blob: d1f027c41e73db3490e3e9577edcd01d17aa7d8f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
103#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700104#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
106#include <net/pkt_sched.h>
107#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000108#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/highmem.h>
110#include <linux/init.h>
111#include <linux/kmod.h>
112#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700116#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700127#include <linux/ipv6.h>
128#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700129#include <linux/jhash.h>
130#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700131#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700152 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700174static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * semaphore.
179 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 *
182 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700183 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196EXPORT_SYMBOL(dev_base_lock);
197
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Eric W. Biedermance286d32007-09-12 13:53:49 +0200209/* Device list insertion */
210static int list_netdevice(struct net_device *dev)
211{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900212 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213
214 ASSERT_RTNL();
215
216 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219 hlist_add_head_rcu(&dev->index_hlist,
220 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000225/* Device list removal
226 * caller must respect a RCU grace period before freeing/reusing dev
227 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228static void unlist_netdevice(struct net_device *dev)
229{
230 ASSERT_RTNL();
231
232 /* Unlink dev from the device chain */
233 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800234 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000235 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237 write_unlock_bh(&dev_base_lock);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/*
241 * Our notifier list
242 */
243
Alan Sternf07d5b92006-05-09 15:23:03 -0700244static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246/*
247 * Device drivers call our routines to queue packets here. We empty the
248 * queue in the local softnet handler.
249 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700250
251DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700252EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David S. Millercf508b12008-07-22 14:16:42 -0700254#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255/*
David S. Millerc773e842008-07-08 23:13:53 -0700256 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700257 * according to dev->type
258 */
259static const unsigned short netdev_lock_type[] =
260 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
261 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
262 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
263 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
264 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
265 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
266 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
267 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
268 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
269 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
270 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
271 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
272 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800273 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400274 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000275 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700277static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700278 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
279 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
280 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
281 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
282 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
283 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
284 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
285 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
286 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
287 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
288 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
289 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
290 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800291 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400292 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000293 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294
295static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700296static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static inline unsigned short netdev_lock_pos(unsigned short dev_type)
299{
300 int i;
301
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
304 return i;
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
307}
308
David S. Millercf508b12008-07-22 14:16:42 -0700309static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311{
312 int i;
313
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
317}
David S. Millercf508b12008-07-22 14:16:42 -0700318
319static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
320{
321 int i;
322
323 i = netdev_lock_pos(dev->type);
324 lockdep_set_class_and_name(&dev->addr_list_lock,
325 &netdev_addr_lock_key[i],
326 netdev_lock_name[i]);
327}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328#else
David S. Millercf508b12008-07-22 14:16:42 -0700329static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
330 unsigned short dev_type)
331{
332}
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700334{
335}
336#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/*******************************************************************************
339
340 Protocol management and registration routines
341
342*******************************************************************************/
343
344/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Add a protocol ID to the list. Now that the input handler is
346 * smarter we can dispense with all the messy stuff that used to be
347 * here.
348 *
349 * BEWARE!!! Protocol handlers, mangling input packets,
350 * MUST BE last in hash buckets and checking protocol handlers
351 * MUST start from promiscuous ptype_all chain in net_bh.
352 * It is true now, do not change it.
353 * Explanation follows: if protocol handler, mangling packet, will
354 * be the first on list, it is not able to sense, that packet
355 * is cloned and should be copied-on-write, so that it will
356 * change it and subsequent readers will get broken packet.
357 * --ANK (980803)
358 */
359
360/**
361 * dev_add_pack - add packet handler
362 * @pt: packet type declaration
363 *
364 * Add a protocol handler to the networking stack. The passed &packet_type
365 * is linked into kernel lists and may not be freed until it has been
366 * removed from the kernel lists.
367 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900368 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 * guarantee all CPU's that are in middle of receiving packets
370 * will see the new packet type (until the next received packet).
371 */
372
373void dev_add_pack(struct packet_type *pt)
374{
375 int hash;
376
377 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700380 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 list_add_rcu(&pt->list, &ptype_base[hash]);
383 }
384 spin_unlock_bh(&ptype_lock);
385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700386EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
389 * __dev_remove_pack - remove packet handler
390 * @pt: packet type declaration
391 *
392 * Remove a protocol handler that was previously added to the kernel
393 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
394 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900395 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 *
397 * The packet type might still be in use by receivers
398 * and must not be freed until after all the CPU's have gone
399 * through a quiescent state.
400 */
401void __dev_remove_pack(struct packet_type *pt)
402{
403 struct list_head *head;
404 struct packet_type *pt1;
405
406 spin_lock_bh(&ptype_lock);
407
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700410 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800411 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
420 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
421out:
422 spin_unlock_bh(&ptype_lock);
423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446/******************************************************************************
447
448 Device Boot-time Settings Routines
449
450*******************************************************************************/
451
452/* Boot time configuration table */
453static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454
455/**
456 * netdev_boot_setup_add - add new setup entry
457 * @name: name of the device
458 * @map: configured settings for the device
459 *
460 * Adds new setup entry to the dev_boot_setup list. The function
461 * returns 0 on error and 1 on success. This is a generic routine to
462 * all netdevices.
463 */
464static int netdev_boot_setup_add(char *name, struct ifmap *map)
465{
466 struct netdev_boot_setup *s;
467 int i;
468
469 s = dev_boot_setup;
470 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
471 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
472 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700473 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 memcpy(&s[i].map, map, sizeof(s[i].map));
475 break;
476 }
477 }
478
479 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
480}
481
482/**
483 * netdev_boot_setup_check - check boot time settings
484 * @dev: the netdevice
485 *
486 * Check boot time settings for the device.
487 * The found settings are set for the device to be used
488 * later in the device probing.
489 * Returns 0 if no settings found, 1 if they are.
490 */
491int netdev_boot_setup_check(struct net_device *dev)
492{
493 struct netdev_boot_setup *s = dev_boot_setup;
494 int i;
495
496 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
497 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700498 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 dev->irq = s[i].map.irq;
500 dev->base_addr = s[i].map.base_addr;
501 dev->mem_start = s[i].map.mem_start;
502 dev->mem_end = s[i].map.mem_end;
503 return 1;
504 }
505 }
506 return 0;
507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700508EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510
511/**
512 * netdev_boot_base - get address from boot time settings
513 * @prefix: prefix for network device
514 * @unit: id for network device
515 *
516 * Check boot time settings for the base address of device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found.
520 */
521unsigned long netdev_boot_base(const char *prefix, int unit)
522{
523 const struct netdev_boot_setup *s = dev_boot_setup;
524 char name[IFNAMSIZ];
525 int i;
526
527 sprintf(name, "%s%d", prefix, unit);
528
529 /*
530 * If device already registered then return base of 1
531 * to indicate not to probe for this interface
532 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700533 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 1;
535
536 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
537 if (!strcmp(name, s[i].name))
538 return s[i].map.base_addr;
539 return 0;
540}
541
542/*
543 * Saves at boot time configured settings for any netdevice.
544 */
545int __init netdev_boot_setup(char *str)
546{
547 int ints[5];
548 struct ifmap map;
549
550 str = get_options(str, ARRAY_SIZE(ints), ints);
551 if (!str || !*str)
552 return 0;
553
554 /* Save settings */
555 memset(&map, 0, sizeof(map));
556 if (ints[0] > 0)
557 map.irq = ints[1];
558 if (ints[0] > 1)
559 map.base_addr = ints[2];
560 if (ints[0] > 2)
561 map.mem_start = ints[3];
562 if (ints[0] > 3)
563 map.mem_end = ints[4];
564
565 /* Add new entry to the list */
566 return netdev_boot_setup_add(str, &map);
567}
568
569__setup("netdev=", netdev_boot_setup);
570
571/*******************************************************************************
572
573 Device Interface Subroutines
574
575*******************************************************************************/
576
577/**
578 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700579 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * @name: name to find
581 *
582 * Find an interface by name. Must be called under RTNL semaphore
583 * or @dev_base_lock. If the name is found a pointer to the device
584 * is returned. If the name is not found then %NULL is returned. The
585 * reference counters are not incremented so the caller must be
586 * careful with locks.
587 */
588
Eric W. Biederman881d9662007-09-17 11:56:21 -0700589struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700595 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (!strncmp(dev->name, name, IFNAMSIZ))
597 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 return NULL;
600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700601EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
607 *
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
613 */
614
615struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
616{
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
620
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
623 return dev;
624
625 return NULL;
626}
627EXPORT_SYMBOL(dev_get_by_name_rcu);
628
629/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @name: name to find
633 *
634 * Find an interface by name. This can be called from any
635 * context and does its own locking. The returned handle has
636 * the usage count incremented and the caller must use dev_put() to
637 * release it when it is no longer needed. %NULL is returned if no
638 * matching device is found.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct net_device *dev;
644
Eric Dumazet72c95282009-10-30 07:11:27 +0000645 rcu_read_lock();
646 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (dev)
648 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000649 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return dev;
651}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700652EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654/**
655 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700656 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * @ifindex: index of device
658 *
659 * Search for an interface by index. Returns %NULL if the device
660 * is not found or a pointer to the device. The device has not
661 * had its reference counter increased so the caller must be careful
662 * about locking. The caller must hold either the RTNL semaphore
663 * or @dev_base_lock.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700672 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (dev->ifindex == ifindex)
674 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return NULL;
677}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700678EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000680/**
681 * dev_get_by_index_rcu - find a device by its ifindex
682 * @net: the applicable net namespace
683 * @ifindex: index of device
684 *
685 * Search for an interface by index. Returns %NULL if the device
686 * is not found or a pointer to the device. The device has not
687 * had its reference counter increased so the caller must be careful
688 * about locking. The caller must hold RCU lock.
689 */
690
691struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
692{
693 struct hlist_node *p;
694 struct net_device *dev;
695 struct hlist_head *head = dev_index_hash(net, ifindex);
696
697 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
698 if (dev->ifindex == ifindex)
699 return dev;
700
701 return NULL;
702}
703EXPORT_SYMBOL(dev_get_by_index_rcu);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/**
707 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700708 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * @ifindex: index of device
710 *
711 * Search for an interface by index. Returns NULL if the device
712 * is not found or a pointer to the device. The device returned has
713 * had a reference added and the pointer is safe until the user calls
714 * dev_put to indicate they have finished with it.
715 */
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000721 rcu_read_lock();
722 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (dev)
724 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000725 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return dev;
727}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700728EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730/**
731 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700732 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * @type: media type of device
734 * @ha: hardware address
735 *
736 * Search for an interface by MAC address. Returns NULL if the device
737 * is not found or a pointer to the device. The caller must hold the
738 * rtnl semaphore. The returned device has not had its ref count increased
739 * and the caller must therefore be careful about locking
740 *
741 * BUGS:
742 * If the API was consistent this would be __dev_get_by_hwaddr
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
749 ASSERT_RTNL();
750
Denis V. Lunev81103a52007-12-12 10:47:38 -0800751 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (dev->type == type &&
753 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700754 return dev;
755
756 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300758EXPORT_SYMBOL(dev_getbyhwaddr);
759
Eric W. Biederman881d9662007-09-17 11:56:21 -0700760struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700761{
762 struct net_device *dev;
763
764 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700765 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700766 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700767 return dev;
768
769 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700770}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700771EXPORT_SYMBOL(__dev_getfirstbyhwtype);
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct net_device *dev;
776
777 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700778 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700779 if (dev)
780 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 rtnl_unlock();
782 return dev;
783}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784EXPORT_SYMBOL(dev_getfirstbyhwtype);
785
786/**
787 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700788 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * @if_flags: IFF_* values
790 * @mask: bitmask of bits in if_flags to check
791 *
792 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900793 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
796 */
797
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700798struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
799 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Pavel Emelianov7562f872007-05-03 15:13:45 -0700803 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (((dev->flags ^ if_flags) & mask) == 0) {
807 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700808 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810 }
811 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800812 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700813 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700815EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/**
818 * dev_valid_name - check if name is okay for network device
819 * @name: name string
820 *
821 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700822 * to allow sysfs to work. We also disallow any kind of
823 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800825int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700827 if (*name == '\0')
828 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700829 if (strlen(name) >= IFNAMSIZ)
830 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700831 if (!strcmp(name, ".") || !strcmp(name, ".."))
832 return 0;
833
834 while (*name) {
835 if (*name == '/' || isspace(*name))
836 return 0;
837 name++;
838 }
839 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700841EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 * __dev_alloc_name - allocate a name for a device
845 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200847 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 */
857
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200858static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 const char *p;
862 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700863 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct net_device *d;
865
866 p = strnchr(name, IFNAMSIZ-1, '%');
867 if (p) {
868 /*
869 * Verify the string as this thing may have come from
870 * the user. There must be either one "%d" and no other "%"
871 * characters.
872 */
873 if (p[1] != 'd' || strchr(p + 2, '%'))
874 return -EINVAL;
875
876 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700877 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (!inuse)
879 return -ENOMEM;
880
Eric W. Biederman881d9662007-09-17 11:56:21 -0700881 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (!sscanf(d->name, name, &i))
883 continue;
884 if (i < 0 || i >= max_netdevices)
885 continue;
886
887 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200888 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (!strncmp(buf, d->name, IFNAMSIZ))
890 set_bit(i, inuse);
891 }
892
893 i = find_first_zero_bit(inuse, max_netdevices);
894 free_page((unsigned long) inuse);
895 }
896
Octavian Purdilad9031022009-11-18 02:36:59 +0000897 if (buf != name)
898 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200899 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 /* It is possible to run out of possible slots
903 * when the name is long and there isn't enough space left
904 * for the digits, or if all bits are used.
905 */
906 return -ENFILE;
907}
908
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200909/**
910 * dev_alloc_name - allocate a name for a device
911 * @dev: device
912 * @name: name format string
913 *
914 * Passed a format string - eg "lt%d" it will try and find a suitable
915 * id. It scans list of devices to build up a free map, then chooses
916 * the first empty slot. The caller must hold the dev_base or rtnl lock
917 * while allocating the name and adding the device in order to avoid
918 * duplicates.
919 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
920 * Returns the number of the unit assigned or a negative errno code.
921 */
922
923int dev_alloc_name(struct net_device *dev, const char *name)
924{
925 char buf[IFNAMSIZ];
926 struct net *net;
927 int ret;
928
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900929 BUG_ON(!dev_net(dev));
930 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200931 ret = __dev_alloc_name(net, name, buf);
932 if (ret >= 0)
933 strlcpy(dev->name, buf, IFNAMSIZ);
934 return ret;
935}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700936EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200937
Octavian Purdilad9031022009-11-18 02:36:59 +0000938static int dev_get_valid_name(struct net *net, const char *name, char *buf,
939 bool fmt)
940{
941 if (!dev_valid_name(name))
942 return -EINVAL;
943
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
947 return -EEXIST;
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
950
951 return 0;
952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954/**
955 * dev_change_name - change name of a device
956 * @dev: device
957 * @newname: name (or format string) must be at least IFNAMSIZ
958 *
959 * Change name of a device, can pass format strings "eth%d".
960 * for wildcarding.
961 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700962int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700966 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700967 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900970 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900972 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (dev->flags & IFF_UP)
974 return -EBUSY;
975
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
977 return 0;
978
Herbert Xufcc5a032007-07-30 17:03:38 -0700979 memcpy(oldname, dev->name, IFNAMSIZ);
980
Octavian Purdilad9031022009-11-18 02:36:59 +0000981 err = dev_get_valid_name(net, newname, dev->name, 1);
982 if (err < 0)
983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Herbert Xufcc5a032007-07-30 17:03:38 -0700985rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700986 /* For now only devices in the initial network namespace
987 * are in sysfs.
988 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800989 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -0700990 ret = device_rename(&dev->dev, dev->name);
991 if (ret) {
992 memcpy(dev->name, oldname, IFNAMSIZ);
993 return ret;
994 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700995 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700996
997 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600998 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000999 write_unlock_bh(&dev_base_lock);
1000
1001 synchronize_rcu();
1002
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001005 write_unlock_bh(&dev_base_lock);
1006
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001008 ret = notifier_to_errno(ret);
1009
1010 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001011 /* err >= 0 after dev_alloc_name() or stores the first errno */
1012 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001013 err = ret;
1014 memcpy(dev->name, oldname, IFNAMSIZ);
1015 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001016 } else {
1017 printk(KERN_ERR
1018 "%s: name change rollback failed: %d.\n",
1019 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001020 }
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 return err;
1024}
1025
1026/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001027 * dev_set_alias - change ifalias of a device
1028 * @dev: device
1029 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001030 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001031 *
1032 * Set ifalias for a device,
1033 */
1034int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1035{
1036 ASSERT_RTNL();
1037
1038 if (len >= IFALIASZ)
1039 return -EINVAL;
1040
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001041 if (!len) {
1042 if (dev->ifalias) {
1043 kfree(dev->ifalias);
1044 dev->ifalias = NULL;
1045 }
1046 return 0;
1047 }
1048
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001049 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001050 if (!dev->ifalias)
1051 return -ENOMEM;
1052
1053 strlcpy(dev->ifalias, alias, len+1);
1054 return len;
1055}
1056
1057
1058/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001059 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001060 * @dev: device to cause notification
1061 *
1062 * Called to indicate a device has changed features.
1063 */
1064void netdev_features_change(struct net_device *dev)
1065{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001066 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001067}
1068EXPORT_SYMBOL(netdev_features_change);
1069
1070/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 * netdev_state_change - device changes state
1072 * @dev: device to cause notification
1073 *
1074 * Called to indicate a device has changed state. This function calls
1075 * the notifier chains for netdev_chain and sends a NEWLINK message
1076 * to the routing socket.
1077 */
1078void netdev_state_change(struct net_device *dev)
1079{
1080 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001081 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1083 }
1084}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001085EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Jiri Pirko3ca5b402010-03-10 10:29:35 +00001087int netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001088{
Jiri Pirko3ca5b402010-03-10 10:29:35 +00001089 return call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001090}
1091EXPORT_SYMBOL(netdev_bonding_change);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/**
1094 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001095 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 * @name: name of interface
1097 *
1098 * If a network interface is not present and the process has suitable
1099 * privileges this function loads the module. If module loading is not
1100 * available in this kernel then it becomes a nop.
1101 */
1102
Eric W. Biederman881d9662007-09-17 11:56:21 -07001103void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001105 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Eric Dumazet72c95282009-10-30 07:11:27 +00001107 rcu_read_lock();
1108 dev = dev_get_by_name_rcu(net, name);
1109 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Eric Parisa8f80e82009-08-13 09:44:51 -04001111 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 request_module("%s", name);
1113}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001114EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Patrick McHardybd380812010-02-26 06:34:53 +00001116static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001118 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001119 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001121 ASSERT_RTNL();
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 * Is it even present?
1125 */
1126 if (!netif_device_present(dev))
1127 return -ENODEV;
1128
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001129 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1130 ret = notifier_to_errno(ret);
1131 if (ret)
1132 return ret;
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 /*
1135 * Call device private open method
1136 */
1137 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001138
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001139 if (ops->ndo_validate_addr)
1140 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001141
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001142 if (!ret && ops->ndo_open)
1143 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001145 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 * If it went open OK then:
1147 */
1148
Jeff Garzikbada3392007-10-23 20:19:37 -07001149 if (ret)
1150 clear_bit(__LINK_STATE_START, &dev->state);
1151 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Set the flags.
1154 */
1155 dev->flags |= IFF_UP;
1156
1157 /*
Dan Williams649274d2009-01-11 00:20:39 -08001158 * Enable NET_DMA
1159 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001160 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001161
1162 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 * Initialize multicasting status
1164 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001165 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 /*
1168 * Wakeup transmit queue engine
1169 */
1170 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001172
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return ret;
1174}
Patrick McHardybd380812010-02-26 06:34:53 +00001175
1176/**
1177 * dev_open - prepare an interface for use.
1178 * @dev: device to open
1179 *
1180 * Takes a device from down to up state. The device's private open
1181 * function is invoked and then the multicast lists are loaded. Finally
1182 * the device is moved into the up state and a %NETDEV_UP message is
1183 * sent to the netdev notifier chain.
1184 *
1185 * Calling this function on an active interface is a nop. On a failure
1186 * a negative errno code is returned.
1187 */
1188int dev_open(struct net_device *dev)
1189{
1190 int ret;
1191
1192 /*
1193 * Is it already up?
1194 */
1195 if (dev->flags & IFF_UP)
1196 return 0;
1197
1198 /*
1199 * Open device
1200 */
1201 ret = __dev_open(dev);
1202 if (ret < 0)
1203 return ret;
1204
1205 /*
1206 * ... and announce new interface.
1207 */
1208 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1209 call_netdevice_notifiers(NETDEV_UP, dev);
1210
1211 return ret;
1212}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001213EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Patrick McHardybd380812010-02-26 06:34:53 +00001215static int __dev_close(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001217 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardybd380812010-02-26 06:34:53 +00001218
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001219 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001220 might_sleep();
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 /*
1223 * Tell people we are going down, so that they can
1224 * prepare to death, when device is still operating.
1225 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001226 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 clear_bit(__LINK_STATE_START, &dev->state);
1229
1230 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001231 * it can be even on different cpu. So just clear netif_running().
1232 *
1233 * dev->stop() will invoke napi_disable() on all of it's
1234 * napi_struct instances on this device.
1235 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001238 dev_deactivate(dev);
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /*
1241 * Call the device specific close. This cannot fail.
1242 * Only if device is UP
1243 *
1244 * We allow it to be called even after a DETACH hot-plug
1245 * event.
1246 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001247 if (ops->ndo_stop)
1248 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
1250 /*
1251 * Device is now down.
1252 */
1253
1254 dev->flags &= ~IFF_UP;
1255
1256 /*
Dan Williams649274d2009-01-11 00:20:39 -08001257 * Shutdown NET_DMA
1258 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001259 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 return 0;
1262}
Patrick McHardybd380812010-02-26 06:34:53 +00001263
1264/**
1265 * dev_close - shutdown an interface.
1266 * @dev: device to shutdown
1267 *
1268 * This function moves an active device into down state. A
1269 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1270 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1271 * chain.
1272 */
1273int dev_close(struct net_device *dev)
1274{
1275 if (!(dev->flags & IFF_UP))
1276 return 0;
1277
1278 __dev_close(dev);
1279
1280 /*
1281 * Tell people we are down
1282 */
1283 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1284 call_netdevice_notifiers(NETDEV_DOWN, dev);
1285
1286 return 0;
1287}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001288EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001291/**
1292 * dev_disable_lro - disable Large Receive Offload on a device
1293 * @dev: device
1294 *
1295 * Disable Large Receive Offload (LRO) on a net device. Must be
1296 * called under RTNL. This is needed if received packets may be
1297 * forwarded to another interface.
1298 */
1299void dev_disable_lro(struct net_device *dev)
1300{
1301 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1302 dev->ethtool_ops->set_flags) {
1303 u32 flags = dev->ethtool_ops->get_flags(dev);
1304 if (flags & ETH_FLAG_LRO) {
1305 flags &= ~ETH_FLAG_LRO;
1306 dev->ethtool_ops->set_flags(dev, flags);
1307 }
1308 }
1309 WARN_ON(dev->features & NETIF_F_LRO);
1310}
1311EXPORT_SYMBOL(dev_disable_lro);
1312
1313
Eric W. Biederman881d9662007-09-17 11:56:21 -07001314static int dev_boot_phase = 1;
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316/*
1317 * Device change register/unregister. These are not inline or static
1318 * as we export them to the world.
1319 */
1320
1321/**
1322 * register_netdevice_notifier - register a network notifier block
1323 * @nb: notifier
1324 *
1325 * Register a notifier to be called when network device events occur.
1326 * The notifier passed is linked into the kernel structures and must
1327 * not be reused until it has been unregistered. A negative errno code
1328 * is returned on a failure.
1329 *
1330 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001331 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 * view of the network device list.
1333 */
1334
1335int register_netdevice_notifier(struct notifier_block *nb)
1336{
1337 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001338 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001339 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 int err;
1341
1342 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001343 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001344 if (err)
1345 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001346 if (dev_boot_phase)
1347 goto unlock;
1348 for_each_net(net) {
1349 for_each_netdev(net, dev) {
1350 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1351 err = notifier_to_errno(err);
1352 if (err)
1353 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Eric W. Biederman881d9662007-09-17 11:56:21 -07001355 if (!(dev->flags & IFF_UP))
1356 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001357
Eric W. Biederman881d9662007-09-17 11:56:21 -07001358 nb->notifier_call(nb, NETDEV_UP, dev);
1359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001361
1362unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 rtnl_unlock();
1364 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001365
1366rollback:
1367 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001368 for_each_net(net) {
1369 for_each_netdev(net, dev) {
1370 if (dev == last)
1371 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001372
Eric W. Biederman881d9662007-09-17 11:56:21 -07001373 if (dev->flags & IFF_UP) {
1374 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1375 nb->notifier_call(nb, NETDEV_DOWN, dev);
1376 }
1377 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001378 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001379 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001380 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001381
1382 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001383 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001385EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387/**
1388 * unregister_netdevice_notifier - unregister a network notifier block
1389 * @nb: notifier
1390 *
1391 * Unregister a notifier previously registered by
1392 * register_netdevice_notifier(). The notifier is unlinked into the
1393 * kernel structures and may then be reused. A negative errno code
1394 * is returned on a failure.
1395 */
1396
1397int unregister_netdevice_notifier(struct notifier_block *nb)
1398{
Herbert Xu9f514952006-03-25 01:24:25 -08001399 int err;
1400
1401 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001402 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001403 rtnl_unlock();
1404 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001406EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408/**
1409 * call_netdevice_notifiers - call all network notifier blocks
1410 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001411 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 *
1413 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001414 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
1416
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001417int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001419 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420}
1421
1422/* When > 0 there are consumers of rx skb time stamps */
1423static atomic_t netstamp_needed = ATOMIC_INIT(0);
1424
1425void net_enable_timestamp(void)
1426{
1427 atomic_inc(&netstamp_needed);
1428}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001429EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431void net_disable_timestamp(void)
1432{
1433 atomic_dec(&netstamp_needed);
1434}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001435EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001437static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001440 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001441 else
1442 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443}
1444
Arnd Bergmann44540962009-11-26 06:07:08 +00001445/**
1446 * dev_forward_skb - loopback an skb to another netif
1447 *
1448 * @dev: destination network device
1449 * @skb: buffer to forward
1450 *
1451 * return values:
1452 * NET_RX_SUCCESS (no congestion)
1453 * NET_RX_DROP (packet was dropped)
1454 *
1455 * dev_forward_skb can be used for injecting an skb from the
1456 * start_xmit function of one device into the receive queue
1457 * of another device.
1458 *
1459 * The receiving device may be in another namespace, so
1460 * we have to clear all information in the skb that could
1461 * impact namespace isolation.
1462 */
1463int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1464{
1465 skb_orphan(skb);
1466
1467 if (!(dev->flags & IFF_UP))
1468 return NET_RX_DROP;
1469
1470 if (skb->len > (dev->mtu + dev->hard_header_len))
1471 return NET_RX_DROP;
1472
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001473 skb_set_dev(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001474 skb->tstamp.tv64 = 0;
1475 skb->pkt_type = PACKET_HOST;
1476 skb->protocol = eth_type_trans(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001477 return netif_rx(skb);
1478}
1479EXPORT_SYMBOL_GPL(dev_forward_skb);
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481/*
1482 * Support routine. Sends outgoing frames to any network
1483 * taps currently in use.
1484 */
1485
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001486static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001489
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001490#ifdef CONFIG_NET_CLS_ACT
1491 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1492 net_timestamp(skb);
1493#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001494 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001495#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 rcu_read_lock();
1498 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1499 /* Never send packets back to the socket
1500 * they originated from - MvS (miquels@drinkel.ow.org)
1501 */
1502 if ((ptype->dev == dev || !ptype->dev) &&
1503 (ptype->af_packet_priv == NULL ||
1504 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001505 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (!skb2)
1507 break;
1508
1509 /* skb->nh should be correctly
1510 set by sender, so that the second statement is
1511 just protection against buggy protocols.
1512 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001513 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001515 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001516 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (net_ratelimit())
1518 printk(KERN_CRIT "protocol %04x is "
1519 "buggy, dev %s\n",
1520 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001521 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
1523
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001524 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001526 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528 }
1529 rcu_read_unlock();
1530}
1531
Denis Vlasenko56079432006-03-29 15:57:29 -08001532
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001533static inline void __netif_reschedule(struct Qdisc *q)
1534{
1535 struct softnet_data *sd;
1536 unsigned long flags;
1537
1538 local_irq_save(flags);
1539 sd = &__get_cpu_var(softnet_data);
1540 q->next_sched = sd->output_queue;
1541 sd->output_queue = q;
1542 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1543 local_irq_restore(flags);
1544}
1545
David S. Miller37437bb2008-07-16 02:15:04 -07001546void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001547{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001548 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1549 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001550}
1551EXPORT_SYMBOL(__netif_schedule);
1552
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001553void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001554{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001555 if (atomic_dec_and_test(&skb->users)) {
1556 struct softnet_data *sd;
1557 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001558
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001559 local_irq_save(flags);
1560 sd = &__get_cpu_var(softnet_data);
1561 skb->next = sd->completion_queue;
1562 sd->completion_queue = skb;
1563 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1564 local_irq_restore(flags);
1565 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001566}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001567EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001568
1569void dev_kfree_skb_any(struct sk_buff *skb)
1570{
1571 if (in_irq() || irqs_disabled())
1572 dev_kfree_skb_irq(skb);
1573 else
1574 dev_kfree_skb(skb);
1575}
1576EXPORT_SYMBOL(dev_kfree_skb_any);
1577
1578
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001579/**
1580 * netif_device_detach - mark device as removed
1581 * @dev: network device
1582 *
1583 * Mark device as removed from system and therefore no longer available.
1584 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001585void netif_device_detach(struct net_device *dev)
1586{
1587 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1588 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001589 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001590 }
1591}
1592EXPORT_SYMBOL(netif_device_detach);
1593
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001594/**
1595 * netif_device_attach - mark device as attached
1596 * @dev: network device
1597 *
1598 * Mark device as attached from system and restart if needed.
1599 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001600void netif_device_attach(struct net_device *dev)
1601{
1602 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1603 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001604 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001605 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001606 }
1607}
1608EXPORT_SYMBOL(netif_device_attach);
1609
Ben Hutchings6de329e2008-06-16 17:02:28 -07001610static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1611{
1612 return ((features & NETIF_F_GEN_CSUM) ||
1613 ((features & NETIF_F_IP_CSUM) &&
1614 protocol == htons(ETH_P_IP)) ||
1615 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001616 protocol == htons(ETH_P_IPV6)) ||
1617 ((features & NETIF_F_FCOE_CRC) &&
1618 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001619}
1620
1621static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1622{
1623 if (can_checksum_protocol(dev->features, skb->protocol))
1624 return true;
1625
1626 if (skb->protocol == htons(ETH_P_8021Q)) {
1627 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1628 if (can_checksum_protocol(dev->features & dev->vlan_features,
1629 veh->h_vlan_encapsulated_proto))
1630 return true;
1631 }
1632
1633 return false;
1634}
Denis Vlasenko56079432006-03-29 15:57:29 -08001635
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001636/**
1637 * skb_dev_set -- assign a new device to a buffer
1638 * @skb: buffer for the new device
1639 * @dev: network device
1640 *
1641 * If an skb is owned by a device already, we have to reset
1642 * all data private to the namespace a device belongs to
1643 * before assigning it a new device.
1644 */
1645#ifdef CONFIG_NET_NS
1646void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1647{
1648 skb_dst_drop(skb);
1649 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1650 secpath_reset(skb);
1651 nf_reset(skb);
1652 skb_init_secmark(skb);
1653 skb->mark = 0;
1654 skb->priority = 0;
1655 skb->nf_trace = 0;
1656 skb->ipvs_property = 0;
1657#ifdef CONFIG_NET_SCHED
1658 skb->tc_index = 0;
1659#endif
1660 }
1661 skb->dev = dev;
1662}
1663EXPORT_SYMBOL(skb_set_dev);
1664#endif /* CONFIG_NET_NS */
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666/*
1667 * Invalidate hardware checksum when packet is to be mangled, and
1668 * complete checksum manually on outgoing path.
1669 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001670int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
Al Virod3bc23e2006-11-14 21:24:49 -08001672 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001673 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Patrick McHardy84fa7932006-08-29 16:44:56 -07001675 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001676 goto out_set_summed;
1677
1678 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001679 /* Let GSO fix up the checksum. */
1680 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
1682
Herbert Xua0308472007-10-15 01:47:15 -07001683 offset = skb->csum_start - skb_headroom(skb);
1684 BUG_ON(offset >= skb_headlen(skb));
1685 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1686
1687 offset += skb->csum_offset;
1688 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1689
1690 if (skb_cloned(skb) &&
1691 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1693 if (ret)
1694 goto out;
1695 }
1696
Herbert Xua0308472007-10-15 01:47:15 -07001697 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001698out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001700out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return ret;
1702}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001703EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001705/**
1706 * skb_gso_segment - Perform segmentation on skb.
1707 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001708 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001709 *
1710 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001711 *
1712 * It may return NULL if the skb requires no segmentation. This is
1713 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001714 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001715struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001716{
1717 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1718 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001719 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001720 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001721
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001722 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001723 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724 __skb_pull(skb, skb->mac_len);
1725
Herbert Xu67fd1a72009-01-19 16:26:44 -08001726 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1727 struct net_device *dev = skb->dev;
1728 struct ethtool_drvinfo info = {};
1729
1730 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1731 dev->ethtool_ops->get_drvinfo(dev, &info);
1732
1733 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1734 "ip_summed=%d",
1735 info.driver, dev ? dev->features : 0L,
1736 skb->sk ? skb->sk->sk_route_caps : 0L,
1737 skb->len, skb->data_len, skb->ip_summed);
1738
Herbert Xua430a432006-07-08 13:34:56 -07001739 if (skb_header_cloned(skb) &&
1740 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1741 return ERR_PTR(err);
1742 }
1743
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001744 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001745 list_for_each_entry_rcu(ptype,
1746 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001747 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001748 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001749 err = ptype->gso_send_check(skb);
1750 segs = ERR_PTR(err);
1751 if (err || skb_gso_ok(skb, features))
1752 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001753 __skb_push(skb, (skb->data -
1754 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001755 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001756 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001757 break;
1758 }
1759 }
1760 rcu_read_unlock();
1761
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001762 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001763
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001764 return segs;
1765}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766EXPORT_SYMBOL(skb_gso_segment);
1767
Herbert Xufb286bb2005-11-10 13:01:24 -08001768/* Take action when hardware reception checksum errors are detected. */
1769#ifdef CONFIG_BUG
1770void netdev_rx_csum_fault(struct net_device *dev)
1771{
1772 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001773 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001774 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001775 dump_stack();
1776 }
1777}
1778EXPORT_SYMBOL(netdev_rx_csum_fault);
1779#endif
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/* Actually, we should eliminate this check as soon as we know, that:
1782 * 1. IOMMU is present and allows to map all the memory.
1783 * 2. No high memory really exists on this machine.
1784 */
1785
1786static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1787{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001788#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 int i;
1790
1791 if (dev->features & NETIF_F_HIGHDMA)
1792 return 0;
1793
1794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1795 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1796 return 1;
1797
Herbert Xu3d3a8532006-06-27 13:33:10 -07001798#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return 0;
1800}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001802struct dev_gso_cb {
1803 void (*destructor)(struct sk_buff *skb);
1804};
1805
1806#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1807
1808static void dev_gso_skb_destructor(struct sk_buff *skb)
1809{
1810 struct dev_gso_cb *cb;
1811
1812 do {
1813 struct sk_buff *nskb = skb->next;
1814
1815 skb->next = nskb->next;
1816 nskb->next = NULL;
1817 kfree_skb(nskb);
1818 } while (skb->next);
1819
1820 cb = DEV_GSO_CB(skb);
1821 if (cb->destructor)
1822 cb->destructor(skb);
1823}
1824
1825/**
1826 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1827 * @skb: buffer to segment
1828 *
1829 * This function segments the given skb and stores the list of segments
1830 * in skb->next.
1831 */
1832static int dev_gso_segment(struct sk_buff *skb)
1833{
1834 struct net_device *dev = skb->dev;
1835 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001836 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1837 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001838
Herbert Xu576a30e2006-06-27 13:22:38 -07001839 segs = skb_gso_segment(skb, features);
1840
1841 /* Verifying header integrity only. */
1842 if (!segs)
1843 return 0;
1844
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001845 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001846 return PTR_ERR(segs);
1847
1848 skb->next = segs;
1849 DEV_GSO_CB(skb)->destructor = skb->destructor;
1850 skb->destructor = dev_gso_skb_destructor;
1851
1852 return 0;
1853}
1854
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001855int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1856 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001857{
Stephen Hemminger00829822008-11-20 20:14:53 -08001858 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001859 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001860
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001861 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001862 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001863 dev_queue_xmit_nit(skb, dev);
1864
Herbert Xu576a30e2006-06-27 13:22:38 -07001865 if (netif_needs_gso(dev, skb)) {
1866 if (unlikely(dev_gso_segment(skb)))
1867 goto out_kfree_skb;
1868 if (skb->next)
1869 goto gso;
1870 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001871
Eric Dumazet93f154b2009-05-18 22:19:19 -07001872 /*
1873 * If device doesnt need skb->dst, release it right now while
1874 * its hot in this cpu cache
1875 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001876 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1877 skb_dst_drop(skb);
1878
Patrick Ohlyac45f602009-02-12 05:03:37 +00001879 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001880 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001881 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001882 /*
1883 * TODO: if skb_orphan() was called by
1884 * dev->hard_start_xmit() (for example, the unmodified
1885 * igb driver does that; bnx2 doesn't), then
1886 * skb_tx_software_timestamp() will be unable to send
1887 * back the time stamp.
1888 *
1889 * How can this be prevented? Always create another
1890 * reference to the socket before calling
1891 * dev->hard_start_xmit()? Prevent that skb_orphan()
1892 * does anything in dev->hard_start_xmit() by clearing
1893 * the skb destructor before the call and restoring it
1894 * afterwards, then doing the skb_orphan() ourselves?
1895 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001896 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001897 }
1898
Herbert Xu576a30e2006-06-27 13:22:38 -07001899gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001900 do {
1901 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001902
1903 skb->next = nskb->next;
1904 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00001905
1906 /*
1907 * If device doesnt need nskb->dst, release it right now while
1908 * its hot in this cpu cache
1909 */
1910 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1911 skb_dst_drop(nskb);
1912
Stephen Hemminger00829822008-11-20 20:14:53 -08001913 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001914 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001915 if (rc & ~NETDEV_TX_MASK)
1916 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001917 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001918 skb->next = nskb;
1919 return rc;
1920 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001921 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001922 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001923 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001924 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001925
Patrick McHardy572a9d72009-11-10 06:14:14 +00001926out_kfree_gso_skb:
1927 if (likely(skb->next == NULL))
1928 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001929out_kfree_skb:
1930 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001931 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001932}
1933
Tom Herbert0a9627f2010-03-16 08:03:29 +00001934static u32 hashrnd __read_mostly;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001935
Stephen Hemminger92477442009-03-21 13:39:26 -07001936u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001937{
David S. Miller70192982009-01-27 16:34:47 -08001938 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001939
David S. Miller513de112009-05-03 14:43:10 -07001940 if (skb_rx_queue_recorded(skb)) {
1941 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001942 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001943 hash -= dev->real_num_tx_queues;
1944 return hash;
1945 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001946
1947 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001948 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001949 else
David S. Miller70192982009-01-27 16:34:47 -08001950 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001951
Tom Herbert0a9627f2010-03-16 08:03:29 +00001952 hash = jhash_1word(hash, hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001953
David S. Millerb6b2fed2008-07-21 09:48:06 -07001954 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001955}
Stephen Hemminger92477442009-03-21 13:39:26 -07001956EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001957
Eric Dumazeted046422009-11-13 21:54:04 +00001958static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1959{
1960 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1961 if (net_ratelimit()) {
Tom Herbert0a9627f2010-03-16 08:03:29 +00001962 netdev_warn(dev, "selects TX queue %d, but "
Eric Dumazeted046422009-11-13 21:54:04 +00001963 "real number of TX queues is %d\n",
Tom Herbert0a9627f2010-03-16 08:03:29 +00001964 queue_index, dev->real_num_tx_queues);
Eric Dumazeted046422009-11-13 21:54:04 +00001965 }
1966 return 0;
1967 }
1968 return queue_index;
1969}
1970
David S. Millere8a04642008-07-17 00:34:19 -07001971static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1972 struct sk_buff *skb)
1973{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001974 u16 queue_index;
1975 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001976
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001977 if (sk_tx_queue_recorded(sk)) {
1978 queue_index = sk_tx_queue_get(sk);
1979 } else {
1980 const struct net_device_ops *ops = dev->netdev_ops;
1981
1982 if (ops->ndo_select_queue) {
1983 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001984 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001985 } else {
1986 queue_index = 0;
1987 if (dev->real_num_tx_queues > 1)
1988 queue_index = skb_tx_hash(dev, skb);
1989
1990 if (sk && sk->sk_dst_cache)
1991 sk_tx_queue_set(sk, queue_index);
1992 }
1993 }
David S. Millereae792b2008-07-15 03:03:33 -07001994
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001995 skb_set_queue_mapping(skb, queue_index);
1996 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001997}
1998
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001999static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2000 struct net_device *dev,
2001 struct netdev_queue *txq)
2002{
2003 spinlock_t *root_lock = qdisc_lock(q);
2004 int rc;
2005
2006 spin_lock(root_lock);
2007 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2008 kfree_skb(skb);
2009 rc = NET_XMIT_DROP;
2010 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2011 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2012 /*
2013 * This is a work-conserving queue; there are no old skbs
2014 * waiting to be sent out; and the qdisc is not running -
2015 * xmit the skb directly.
2016 */
2017 __qdisc_update_bstats(q, skb->len);
2018 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2019 __qdisc_run(q);
2020 else
2021 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2022
2023 rc = NET_XMIT_SUCCESS;
2024 } else {
2025 rc = qdisc_enqueue_root(skb, q);
2026 qdisc_run(q);
2027 }
2028 spin_unlock(root_lock);
2029
2030 return rc;
2031}
2032
Krishna Kumar4b258462010-01-21 01:26:29 -08002033/*
2034 * Returns true if either:
2035 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2036 * 2. skb is fragmented and the device does not support SG, or if
2037 * at least one of fragments is in highmem and device does not
2038 * support DMA from it.
2039 */
2040static inline int skb_needs_linearize(struct sk_buff *skb,
2041 struct net_device *dev)
2042{
2043 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2044 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2045 illegal_highdma(dev, skb)));
2046}
2047
Dave Jonesd29f7492008-07-22 14:09:06 -07002048/**
2049 * dev_queue_xmit - transmit a buffer
2050 * @skb: buffer to transmit
2051 *
2052 * Queue a buffer for transmission to a network device. The caller must
2053 * have set the device and priority and built the buffer before calling
2054 * this function. The function can be called from an interrupt.
2055 *
2056 * A negative errno code is returned on a failure. A success does not
2057 * guarantee the frame will be transmitted as it may be dropped due
2058 * to congestion or traffic shaping.
2059 *
2060 * -----------------------------------------------------------------------------------
2061 * I notice this method can also return errors from the queue disciplines,
2062 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2063 * be positive.
2064 *
2065 * Regardless of the return value, the skb is consumed, so it is currently
2066 * difficult to retry a send to this method. (You can bump the ref count
2067 * before sending to hold a reference for retry if you are careful.)
2068 *
2069 * When calling this method, interrupts MUST be enabled. This is because
2070 * the BH enable code must have IRQs enabled so that it will not deadlock.
2071 * --BLG
2072 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073int dev_queue_xmit(struct sk_buff *skb)
2074{
2075 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002076 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 struct Qdisc *q;
2078 int rc = -ENOMEM;
2079
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002080 /* GSO will handle the following emulations directly. */
2081 if (netif_needs_gso(dev, skb))
2082 goto gso;
2083
Krishna Kumar4b258462010-01-21 01:26:29 -08002084 /* Convert a paged skb to linear, if required */
2085 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 goto out_kfree_skb;
2087
2088 /* If packet is not checksummed and device does not support
2089 * checksumming for this protocol, complete checksumming here.
2090 */
Herbert Xu663ead32007-04-09 11:59:07 -07002091 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2092 skb_set_transport_header(skb, skb->csum_start -
2093 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002094 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2095 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002098gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002099 /* Disable soft irqs for various locks below. Also
2100 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002102 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
David S. Millereae792b2008-07-15 03:03:33 -07002104 txq = dev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002105 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002108 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109#endif
2110 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002111 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002112 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 }
2114
2115 /* The device has no queue. Common case for software devices:
2116 loopback, all the sorts of tunnels...
2117
Herbert Xu932ff272006-06-09 12:20:56 -07002118 Really, it is unlikely that netif_tx_lock protection is necessary
2119 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 counters.)
2121 However, it is possible, that they rely on protection
2122 made by us here.
2123
2124 Check this and shot the lock. It is not prone from deadlocks.
2125 Either shot noqueue qdisc, it is even simpler 8)
2126 */
2127 if (dev->flags & IFF_UP) {
2128 int cpu = smp_processor_id(); /* ok because BHs are off */
2129
David S. Millerc773e842008-07-08 23:13:53 -07002130 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
David S. Millerc773e842008-07-08 23:13:53 -07002132 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002134 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002135 rc = dev_hard_start_xmit(skb, dev, txq);
2136 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002137 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 goto out;
2139 }
2140 }
David S. Millerc773e842008-07-08 23:13:53 -07002141 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 if (net_ratelimit())
2143 printk(KERN_CRIT "Virtual device %s asks to "
2144 "queue packet!\n", dev->name);
2145 } else {
2146 /* Recursion is detected! It is possible,
2147 * unfortunately */
2148 if (net_ratelimit())
2149 printk(KERN_CRIT "Dead loop on virtual device "
2150 "%s, fix it urgently!\n", dev->name);
2151 }
2152 }
2153
2154 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002155 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157out_kfree_skb:
2158 kfree_skb(skb);
2159 return rc;
2160out:
Herbert Xud4828d82006-06-22 02:28:18 -07002161 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 return rc;
2163}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002164EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166
2167/*=======================================================================
2168 Receiver routines
2169 =======================================================================*/
2170
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002171int netdev_max_backlog __read_mostly = 1000;
2172int netdev_budget __read_mostly = 300;
2173int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2176
Tom Herbert1e94d722010-03-18 17:45:44 -07002177#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00002178/*
2179 * get_rps_cpu is called from netif_receive_skb and returns the target
2180 * CPU from the RPS map of the receiving queue for a given skb.
2181 */
2182static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2183{
2184 struct ipv6hdr *ip6;
2185 struct iphdr *ip;
2186 struct netdev_rx_queue *rxqueue;
2187 struct rps_map *map;
2188 int cpu = -1;
2189 u8 ip_proto;
2190 u32 addr1, addr2, ports, ihl;
2191
2192 rcu_read_lock();
2193
2194 if (skb_rx_queue_recorded(skb)) {
2195 u16 index = skb_get_rx_queue(skb);
2196 if (unlikely(index >= dev->num_rx_queues)) {
2197 if (net_ratelimit()) {
2198 netdev_warn(dev, "received packet on queue "
2199 "%u, but number of RX queues is %u\n",
2200 index, dev->num_rx_queues);
2201 }
2202 goto done;
2203 }
2204 rxqueue = dev->_rx + index;
2205 } else
2206 rxqueue = dev->_rx;
2207
2208 if (!rxqueue->rps_map)
2209 goto done;
2210
2211 if (skb->rxhash)
2212 goto got_hash; /* Skip hash computation on packet header */
2213
2214 switch (skb->protocol) {
2215 case __constant_htons(ETH_P_IP):
2216 if (!pskb_may_pull(skb, sizeof(*ip)))
2217 goto done;
2218
2219 ip = (struct iphdr *) skb->data;
2220 ip_proto = ip->protocol;
2221 addr1 = ip->saddr;
2222 addr2 = ip->daddr;
2223 ihl = ip->ihl;
2224 break;
2225 case __constant_htons(ETH_P_IPV6):
2226 if (!pskb_may_pull(skb, sizeof(*ip6)))
2227 goto done;
2228
2229 ip6 = (struct ipv6hdr *) skb->data;
2230 ip_proto = ip6->nexthdr;
2231 addr1 = ip6->saddr.s6_addr32[3];
2232 addr2 = ip6->daddr.s6_addr32[3];
2233 ihl = (40 >> 2);
2234 break;
2235 default:
2236 goto done;
2237 }
2238 ports = 0;
2239 switch (ip_proto) {
2240 case IPPROTO_TCP:
2241 case IPPROTO_UDP:
2242 case IPPROTO_DCCP:
2243 case IPPROTO_ESP:
2244 case IPPROTO_AH:
2245 case IPPROTO_SCTP:
2246 case IPPROTO_UDPLITE:
2247 if (pskb_may_pull(skb, (ihl * 4) + 4))
2248 ports = *((u32 *) (skb->data + (ihl * 4)));
2249 break;
2250
2251 default:
2252 break;
2253 }
2254
2255 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2256 if (!skb->rxhash)
2257 skb->rxhash = 1;
2258
2259got_hash:
2260 map = rcu_dereference(rxqueue->rps_map);
2261 if (map) {
2262 u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2263
2264 if (cpu_online(tcpu)) {
2265 cpu = tcpu;
2266 goto done;
2267 }
2268 }
2269
2270done:
2271 rcu_read_unlock();
2272 return cpu;
2273}
2274
2275/*
2276 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2277 * to be sent to kick remote softirq processing. There are two masks since
2278 * the sending of IPIs must be done with interrupts enabled. The select field
2279 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2280 * select is flipped before net_rps_action is called while still under lock,
2281 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2282 * it without conflicting with enqueue_backlog operation.
2283 */
2284struct rps_remote_softirq_cpus {
2285 cpumask_t mask[2];
2286 int select;
2287};
2288static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
2289
2290/* Called from hardirq (IPI) context */
2291static void trigger_softirq(void *data)
2292{
2293 struct softnet_data *queue = data;
2294 __napi_schedule(&queue->backlog);
2295 __get_cpu_var(netdev_rx_stat).received_rps++;
2296}
Tom Herbert1e94d722010-03-18 17:45:44 -07002297#endif /* CONFIG_SMP */
Tom Herbert0a9627f2010-03-16 08:03:29 +00002298
2299/*
2300 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2301 * queue (may be a remote CPU queue).
2302 */
2303static int enqueue_to_backlog(struct sk_buff *skb, int cpu)
2304{
2305 struct softnet_data *queue;
2306 unsigned long flags;
2307
2308 queue = &per_cpu(softnet_data, cpu);
2309
2310 local_irq_save(flags);
2311 __get_cpu_var(netdev_rx_stat).total++;
2312
2313 spin_lock(&queue->input_pkt_queue.lock);
2314 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2315 if (queue->input_pkt_queue.qlen) {
2316enqueue:
2317 __skb_queue_tail(&queue->input_pkt_queue, skb);
2318 spin_unlock_irqrestore(&queue->input_pkt_queue.lock,
2319 flags);
2320 return NET_RX_SUCCESS;
2321 }
2322
2323 /* Schedule NAPI for backlog device */
2324 if (napi_schedule_prep(&queue->backlog)) {
Tom Herbert1e94d722010-03-18 17:45:44 -07002325#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00002326 if (cpu != smp_processor_id()) {
2327 struct rps_remote_softirq_cpus *rcpus =
2328 &__get_cpu_var(rps_remote_softirq_cpus);
2329
2330 cpu_set(cpu, rcpus->mask[rcpus->select]);
2331 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2332 } else
2333 __napi_schedule(&queue->backlog);
Tom Herbert1e94d722010-03-18 17:45:44 -07002334#else
2335 __napi_schedule(&queue->backlog);
2336#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00002337 }
2338 goto enqueue;
2339 }
2340
2341 spin_unlock(&queue->input_pkt_queue.lock);
2342
2343 __get_cpu_var(netdev_rx_stat).dropped++;
2344 local_irq_restore(flags);
2345
2346 kfree_skb(skb);
2347 return NET_RX_DROP;
2348}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350/**
2351 * netif_rx - post buffer to the network code
2352 * @skb: buffer to post
2353 *
2354 * This function receives a packet from a device driver and queues it for
2355 * the upper (protocol) levels to process. It always succeeds. The buffer
2356 * may be dropped during processing for congestion control or by the
2357 * protocol layers.
2358 *
2359 * return values:
2360 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 * NET_RX_DROP (packet was dropped)
2362 *
2363 */
2364
2365int netif_rx(struct sk_buff *skb)
2366{
Tom Herbert0a9627f2010-03-16 08:03:29 +00002367 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
2369 /* if netpoll wants it, pretend we never saw it */
2370 if (netpoll_rx(skb))
2371 return NET_RX_DROP;
2372
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002373 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002374 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Tom Herbert1e94d722010-03-18 17:45:44 -07002376#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00002377 cpu = get_rps_cpu(skb->dev, skb);
2378 if (cpu < 0)
2379 cpu = smp_processor_id();
Tom Herbert1e94d722010-03-18 17:45:44 -07002380#else
2381 cpu = smp_processor_id();
2382#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383
Tom Herbert0a9627f2010-03-16 08:03:29 +00002384 return enqueue_to_backlog(skb, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002386EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
2388int netif_rx_ni(struct sk_buff *skb)
2389{
2390 int err;
2391
2392 preempt_disable();
2393 err = netif_rx(skb);
2394 if (local_softirq_pending())
2395 do_softirq();
2396 preempt_enable();
2397
2398 return err;
2399}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400EXPORT_SYMBOL(netif_rx_ni);
2401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402static void net_tx_action(struct softirq_action *h)
2403{
2404 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2405
2406 if (sd->completion_queue) {
2407 struct sk_buff *clist;
2408
2409 local_irq_disable();
2410 clist = sd->completion_queue;
2411 sd->completion_queue = NULL;
2412 local_irq_enable();
2413
2414 while (clist) {
2415 struct sk_buff *skb = clist;
2416 clist = clist->next;
2417
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002418 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 __kfree_skb(skb);
2420 }
2421 }
2422
2423 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002424 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
2426 local_irq_disable();
2427 head = sd->output_queue;
2428 sd->output_queue = NULL;
2429 local_irq_enable();
2430
2431 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002432 struct Qdisc *q = head;
2433 spinlock_t *root_lock;
2434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 head = head->next_sched;
2436
David S. Miller5fb66222008-08-02 20:02:43 -07002437 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002438 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002439 smp_mb__before_clear_bit();
2440 clear_bit(__QDISC_STATE_SCHED,
2441 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002442 qdisc_run(q);
2443 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002445 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002446 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002447 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002448 } else {
2449 smp_mb__before_clear_bit();
2450 clear_bit(__QDISC_STATE_SCHED,
2451 &q->state);
2452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 }
2454 }
2455 }
2456}
2457
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002458static inline int deliver_skb(struct sk_buff *skb,
2459 struct packet_type *pt_prev,
2460 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
2462 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002463 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464}
2465
2466#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002467
2468#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2469/* This hook is defined here for ATM LANE */
2470int (*br_fdb_test_addr_hook)(struct net_device *dev,
2471 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002472EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002473#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Stephen Hemminger6229e362007-03-21 13:38:47 -07002475/*
2476 * If bridge module is loaded call bridging hook.
2477 * returns NULL if packet was consumed.
2478 */
2479struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2480 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002481EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002482
Stephen Hemminger6229e362007-03-21 13:38:47 -07002483static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2484 struct packet_type **pt_prev, int *ret,
2485 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486{
2487 struct net_bridge_port *port;
2488
Stephen Hemminger6229e362007-03-21 13:38:47 -07002489 if (skb->pkt_type == PACKET_LOOPBACK ||
2490 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2491 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
2493 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002494 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002496 }
2497
Stephen Hemminger6229e362007-03-21 13:38:47 -07002498 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499}
2500#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002501#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502#endif
2503
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002504#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2505struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2506EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2507
2508static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2509 struct packet_type **pt_prev,
2510 int *ret,
2511 struct net_device *orig_dev)
2512{
2513 if (skb->dev->macvlan_port == NULL)
2514 return skb;
2515
2516 if (*pt_prev) {
2517 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2518 *pt_prev = NULL;
2519 }
2520 return macvlan_handle_frame_hook(skb);
2521}
2522#else
2523#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2524#endif
2525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526#ifdef CONFIG_NET_CLS_ACT
2527/* TODO: Maybe we should just force sch_ingress to be compiled in
2528 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2529 * a compare and 2 stores extra right now if we dont have it on
2530 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002531 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 * the ingress scheduler, you just cant add policies on ingress.
2533 *
2534 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002535static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002538 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002539 struct netdev_queue *rxq;
2540 int result = TC_ACT_OK;
2541 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002542
Herbert Xuf697c3e2007-10-14 00:38:47 -07002543 if (MAX_RED_LOOP < ttl++) {
2544 printk(KERN_WARNING
2545 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002546 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002547 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 }
2549
Herbert Xuf697c3e2007-10-14 00:38:47 -07002550 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2551 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2552
David S. Miller555353c2008-07-08 17:33:13 -07002553 rxq = &dev->rx_queue;
2554
David S. Miller83874002008-07-17 00:53:03 -07002555 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002556 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002557 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002558 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2559 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002560 spin_unlock(qdisc_lock(q));
2561 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 return result;
2564}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002565
2566static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2567 struct packet_type **pt_prev,
2568 int *ret, struct net_device *orig_dev)
2569{
David S. Miller8d50b532008-07-30 02:37:46 -07002570 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002571 goto out;
2572
2573 if (*pt_prev) {
2574 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2575 *pt_prev = NULL;
2576 } else {
2577 /* Huh? Why does turning on AF_PACKET affect this? */
2578 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2579 }
2580
2581 switch (ing_filter(skb)) {
2582 case TC_ACT_SHOT:
2583 case TC_ACT_STOLEN:
2584 kfree_skb(skb);
2585 return NULL;
2586 }
2587
2588out:
2589 skb->tc_verd = 0;
2590 return skb;
2591}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592#endif
2593
Patrick McHardybc1d0412008-07-14 22:49:30 -07002594/*
2595 * netif_nit_deliver - deliver received packets to network taps
2596 * @skb: buffer
2597 *
2598 * This function is used to deliver incoming packets to network
2599 * taps. It should be used when the normal netif_receive_skb path
2600 * is bypassed, for example because of VLAN acceleration.
2601 */
2602void netif_nit_deliver(struct sk_buff *skb)
2603{
2604 struct packet_type *ptype;
2605
2606 if (list_empty(&ptype_all))
2607 return;
2608
2609 skb_reset_network_header(skb);
2610 skb_reset_transport_header(skb);
2611 skb->mac_len = skb->network_header - skb->mac_header;
2612
2613 rcu_read_lock();
2614 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2615 if (!ptype->dev || ptype->dev == skb->dev)
2616 deliver_skb(skb, ptype, skb->dev);
2617 }
2618 rcu_read_unlock();
2619}
2620
Tom Herbert0a9627f2010-03-16 08:03:29 +00002621int __netif_receive_skb(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622{
2623 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002624 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002625 struct net_device *null_or_orig;
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002626 struct net_device *null_or_bond;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002628 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002630 if (!skb->tstamp.tv64)
2631 net_timestamp(skb);
2632
Eric Dumazet05423b22009-10-26 18:40:35 -07002633 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002634 return NET_RX_SUCCESS;
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002637 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return NET_RX_DROP;
2639
Eric Dumazet8964be42009-11-20 15:35:04 -08002640 if (!skb->skb_iif)
2641 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002642
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002643 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002644 orig_dev = skb->dev;
2645 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002646 if (skb_bond_should_drop(skb))
2647 null_or_orig = orig_dev; /* deliver only exact match */
2648 else
2649 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002650 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 __get_cpu_var(netdev_rx_stat).total++;
2653
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002654 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002655 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002656 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658 pt_prev = NULL;
2659
2660 rcu_read_lock();
2661
2662#ifdef CONFIG_NET_CLS_ACT
2663 if (skb->tc_verd & TC_NCLS) {
2664 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2665 goto ncls;
2666 }
2667#endif
2668
2669 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002670 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2671 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002672 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002673 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 pt_prev = ptype;
2675 }
2676 }
2677
2678#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002679 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2680 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682ncls:
2683#endif
2684
Stephen Hemminger6229e362007-03-21 13:38:47 -07002685 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2686 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002688 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2689 if (!skb)
2690 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002692 /*
2693 * Make sure frames received on VLAN interfaces stacked on
2694 * bonding interfaces still make their way to any base bonding
2695 * device that may have registered for a specific ptype. The
2696 * handler may have to adjust skb->dev and orig_dev.
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002697 */
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002698 null_or_bond = NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002699 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2700 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002701 null_or_bond = vlan_dev_real_dev(skb->dev);
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002702 }
2703
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002705 list_for_each_entry_rcu(ptype,
2706 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002707 if (ptype->type == type && (ptype->dev == null_or_orig ||
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002708 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2709 ptype->dev == null_or_bond)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002710 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002711 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 pt_prev = ptype;
2713 }
2714 }
2715
2716 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002717 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 } else {
2719 kfree_skb(skb);
2720 /* Jamal, now you will not able to escape explaining
2721 * me how you were going to use this. :-)
2722 */
2723 ret = NET_RX_DROP;
2724 }
2725
2726out:
2727 rcu_read_unlock();
2728 return ret;
2729}
Tom Herbert0a9627f2010-03-16 08:03:29 +00002730
2731/**
2732 * netif_receive_skb - process receive buffer from network
2733 * @skb: buffer to process
2734 *
2735 * netif_receive_skb() is the main receive data processing function.
2736 * It always succeeds. The buffer may be dropped during processing
2737 * for congestion control or by the protocol layers.
2738 *
2739 * This function may only be called from softirq context and interrupts
2740 * should be enabled.
2741 *
2742 * Return values (usually ignored):
2743 * NET_RX_SUCCESS: no congestion
2744 * NET_RX_DROP: packet was dropped
2745 */
2746int netif_receive_skb(struct sk_buff *skb)
2747{
Tom Herbert1e94d722010-03-18 17:45:44 -07002748#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00002749 int cpu;
2750
2751 cpu = get_rps_cpu(skb->dev, skb);
2752
2753 if (cpu < 0)
2754 return __netif_receive_skb(skb);
2755 else
2756 return enqueue_to_backlog(skb, cpu);
Tom Herbert1e94d722010-03-18 17:45:44 -07002757#else
2758 return __netif_receive_skb(skb);
2759#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00002760}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002761EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002763/* Network device is going away, flush any packets still pending */
2764static void flush_backlog(void *arg)
2765{
2766 struct net_device *dev = arg;
2767 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2768 struct sk_buff *skb, *tmp;
2769
2770 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2771 if (skb->dev == dev) {
2772 __skb_unlink(skb, &queue->input_pkt_queue);
2773 kfree_skb(skb);
2774 }
2775}
2776
Herbert Xud565b0a2008-12-15 23:38:52 -08002777static int napi_gro_complete(struct sk_buff *skb)
2778{
2779 struct packet_type *ptype;
2780 __be16 type = skb->protocol;
2781 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2782 int err = -ENOENT;
2783
Herbert Xufc59f9a2009-04-14 15:11:06 -07002784 if (NAPI_GRO_CB(skb)->count == 1) {
2785 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002786 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002787 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002788
2789 rcu_read_lock();
2790 list_for_each_entry_rcu(ptype, head, list) {
2791 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2792 continue;
2793
2794 err = ptype->gro_complete(skb);
2795 break;
2796 }
2797 rcu_read_unlock();
2798
2799 if (err) {
2800 WARN_ON(&ptype->list == head);
2801 kfree_skb(skb);
2802 return NET_RX_SUCCESS;
2803 }
2804
2805out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002806 return netif_receive_skb(skb);
2807}
2808
David S. Miller11380a42010-01-19 13:46:10 -08002809static void napi_gro_flush(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08002810{
2811 struct sk_buff *skb, *next;
2812
2813 for (skb = napi->gro_list; skb; skb = next) {
2814 next = skb->next;
2815 skb->next = NULL;
2816 napi_gro_complete(skb);
2817 }
2818
Herbert Xu4ae55442009-02-08 18:00:36 +00002819 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002820 napi->gro_list = NULL;
2821}
Herbert Xud565b0a2008-12-15 23:38:52 -08002822
Ben Hutchings5b252f02009-10-29 07:17:09 +00002823enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002824{
2825 struct sk_buff **pp = NULL;
2826 struct packet_type *ptype;
2827 __be16 type = skb->protocol;
2828 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002829 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002830 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002831 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002832
2833 if (!(skb->dev->features & NETIF_F_GRO))
2834 goto normal;
2835
David S. Miller4cf704f2009-06-09 00:18:51 -07002836 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002837 goto normal;
2838
Herbert Xud565b0a2008-12-15 23:38:52 -08002839 rcu_read_lock();
2840 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002841 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2842 continue;
2843
Herbert Xu86911732009-01-29 14:19:50 +00002844 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002845 mac_len = skb->network_header - skb->mac_header;
2846 skb->mac_len = mac_len;
2847 NAPI_GRO_CB(skb)->same_flow = 0;
2848 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002849 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002850
Herbert Xud565b0a2008-12-15 23:38:52 -08002851 pp = ptype->gro_receive(&napi->gro_list, skb);
2852 break;
2853 }
2854 rcu_read_unlock();
2855
2856 if (&ptype->list == head)
2857 goto normal;
2858
Herbert Xu0da2afd52008-12-26 14:57:42 -08002859 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002860 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002861
Herbert Xud565b0a2008-12-15 23:38:52 -08002862 if (pp) {
2863 struct sk_buff *nskb = *pp;
2864
2865 *pp = nskb->next;
2866 nskb->next = NULL;
2867 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002868 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002869 }
2870
Herbert Xu0da2afd52008-12-26 14:57:42 -08002871 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002872 goto ok;
2873
Herbert Xu4ae55442009-02-08 18:00:36 +00002874 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002875 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002876
Herbert Xu4ae55442009-02-08 18:00:36 +00002877 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002878 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002879 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002880 skb->next = napi->gro_list;
2881 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002882 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002883
Herbert Xuad0f9902009-02-01 01:24:55 -08002884pull:
Herbert Xucb189782009-05-26 18:50:31 +00002885 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2886 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2887
2888 BUG_ON(skb->end - skb->tail < grow);
2889
2890 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2891
2892 skb->tail += grow;
2893 skb->data_len -= grow;
2894
2895 skb_shinfo(skb)->frags[0].page_offset += grow;
2896 skb_shinfo(skb)->frags[0].size -= grow;
2897
2898 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2899 put_page(skb_shinfo(skb)->frags[0].page);
2900 memmove(skb_shinfo(skb)->frags,
2901 skb_shinfo(skb)->frags + 1,
2902 --skb_shinfo(skb)->nr_frags);
2903 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002904 }
2905
Herbert Xud565b0a2008-12-15 23:38:52 -08002906ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002907 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002908
2909normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002910 ret = GRO_NORMAL;
2911 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002912}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002913EXPORT_SYMBOL(dev_gro_receive);
2914
Ben Hutchings5b252f02009-10-29 07:17:09 +00002915static gro_result_t
2916__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002917{
2918 struct sk_buff *p;
2919
Herbert Xud1c76af2009-03-16 10:50:02 -07002920 if (netpoll_rx_on(skb))
2921 return GRO_NORMAL;
2922
Herbert Xu96e93ea2009-01-06 10:49:34 -08002923 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08002924 NAPI_GRO_CB(p)->same_flow =
2925 (p->dev == skb->dev) &&
2926 !compare_ether_header(skb_mac_header(p),
2927 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002928 NAPI_GRO_CB(p)->flush = 0;
2929 }
2930
2931 return dev_gro_receive(napi, skb);
2932}
Herbert Xu5d38a072009-01-04 16:13:40 -08002933
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002934gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002935{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002936 switch (ret) {
2937 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002938 if (netif_receive_skb(skb))
2939 ret = GRO_DROP;
2940 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002941
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002942 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002943 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002944 kfree_skb(skb);
2945 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002946
2947 case GRO_HELD:
2948 case GRO_MERGED:
2949 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002950 }
2951
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002952 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002953}
2954EXPORT_SYMBOL(napi_skb_finish);
2955
Herbert Xu78a478d2009-05-26 18:50:21 +00002956void skb_gro_reset_offset(struct sk_buff *skb)
2957{
2958 NAPI_GRO_CB(skb)->data_offset = 0;
2959 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002960 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002961
Herbert Xu78d3fd02009-05-26 18:50:23 +00002962 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002963 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002964 NAPI_GRO_CB(skb)->frag0 =
2965 page_address(skb_shinfo(skb)->frags[0].page) +
2966 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002967 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2968 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002969}
2970EXPORT_SYMBOL(skb_gro_reset_offset);
2971
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002972gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002973{
Herbert Xu86911732009-01-29 14:19:50 +00002974 skb_gro_reset_offset(skb);
2975
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002976 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002977}
2978EXPORT_SYMBOL(napi_gro_receive);
2979
Herbert Xu96e93ea2009-01-06 10:49:34 -08002980void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2981{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002982 __skb_pull(skb, skb_headlen(skb));
2983 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2984
2985 napi->skb = skb;
2986}
2987EXPORT_SYMBOL(napi_reuse_skb);
2988
Herbert Xu76620aa2009-04-16 02:02:07 -07002989struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002990{
Herbert Xu5d38a072009-01-04 16:13:40 -08002991 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002992
2993 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002994 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2995 if (skb)
2996 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002997 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002998 return skb;
2999}
Herbert Xu76620aa2009-04-16 02:02:07 -07003000EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003001
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003002gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3003 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003004{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003005 switch (ret) {
3006 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00003007 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00003008 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00003009
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003010 if (ret == GRO_HELD)
3011 skb_gro_pull(skb, -ETH_HLEN);
3012 else if (netif_receive_skb(skb))
3013 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00003014 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003015
3016 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003017 case GRO_MERGED_FREE:
3018 napi_reuse_skb(napi, skb);
3019 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00003020
3021 case GRO_MERGED:
3022 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003023 }
3024
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003025 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003026}
3027EXPORT_SYMBOL(napi_frags_finish);
3028
Herbert Xu76620aa2009-04-16 02:02:07 -07003029struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08003030{
Herbert Xu76620aa2009-04-16 02:02:07 -07003031 struct sk_buff *skb = napi->skb;
3032 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00003033 unsigned int hlen;
3034 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07003035
3036 napi->skb = NULL;
3037
3038 skb_reset_mac_header(skb);
3039 skb_gro_reset_offset(skb);
3040
Herbert Xua5b1cf22009-05-26 18:50:28 +00003041 off = skb_gro_offset(skb);
3042 hlen = off + sizeof(*eth);
3043 eth = skb_gro_header_fast(skb, off);
3044 if (skb_gro_header_hard(skb, hlen)) {
3045 eth = skb_gro_header_slow(skb, hlen, off);
3046 if (unlikely(!eth)) {
3047 napi_reuse_skb(napi, skb);
3048 skb = NULL;
3049 goto out;
3050 }
Herbert Xu76620aa2009-04-16 02:02:07 -07003051 }
3052
3053 skb_gro_pull(skb, sizeof(*eth));
3054
3055 /*
3056 * This works because the only protocols we care about don't require
3057 * special handling. We'll fix it up properly at the end.
3058 */
3059 skb->protocol = eth->h_proto;
3060
3061out:
3062 return skb;
3063}
3064EXPORT_SYMBOL(napi_frags_skb);
3065
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003066gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07003067{
3068 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08003069
3070 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07003071 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08003072
Herbert Xu5d0d9be2009-01-29 14:19:48 +00003073 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08003074}
3075EXPORT_SYMBOL(napi_gro_frags);
3076
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003077static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078{
3079 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080 struct softnet_data *queue = &__get_cpu_var(softnet_data);
3081 unsigned long start_time = jiffies;
3082
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003083 napi->weight = weight_p;
3084 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086
Tom Herbert0a9627f2010-03-16 08:03:29 +00003087 spin_lock_irq(&queue->input_pkt_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003089 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07003090 __napi_complete(napi);
Tom Herbert0a9627f2010-03-16 08:03:29 +00003091 spin_unlock_irq(&queue->input_pkt_queue.lock);
Herbert Xu8f1ead22009-03-26 00:59:10 -07003092 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003093 }
Tom Herbert0a9627f2010-03-16 08:03:29 +00003094 spin_unlock_irq(&queue->input_pkt_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
Tom Herbert0a9627f2010-03-16 08:03:29 +00003096 __netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003097 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003099 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100}
3101
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003102/**
3103 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003104 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003105 *
3106 * The entry's receive function will be scheduled to run
3107 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08003108void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003109{
3110 unsigned long flags;
3111
3112 local_irq_save(flags);
3113 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
3114 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3115 local_irq_restore(flags);
3116}
3117EXPORT_SYMBOL(__napi_schedule);
3118
Herbert Xud565b0a2008-12-15 23:38:52 -08003119void __napi_complete(struct napi_struct *n)
3120{
3121 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3122 BUG_ON(n->gro_list);
3123
3124 list_del(&n->poll_list);
3125 smp_mb__before_clear_bit();
3126 clear_bit(NAPI_STATE_SCHED, &n->state);
3127}
3128EXPORT_SYMBOL(__napi_complete);
3129
3130void napi_complete(struct napi_struct *n)
3131{
3132 unsigned long flags;
3133
3134 /*
3135 * don't let napi dequeue from the cpu poll list
3136 * just in case its running on a different cpu
3137 */
3138 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3139 return;
3140
3141 napi_gro_flush(n);
3142 local_irq_save(flags);
3143 __napi_complete(n);
3144 local_irq_restore(flags);
3145}
3146EXPORT_SYMBOL(napi_complete);
3147
3148void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3149 int (*poll)(struct napi_struct *, int), int weight)
3150{
3151 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00003152 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003153 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08003154 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08003155 napi->poll = poll;
3156 napi->weight = weight;
3157 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08003158 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08003159#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08003160 spin_lock_init(&napi->poll_lock);
3161 napi->poll_owner = -1;
3162#endif
3163 set_bit(NAPI_STATE_SCHED, &napi->state);
3164}
3165EXPORT_SYMBOL(netif_napi_add);
3166
3167void netif_napi_del(struct napi_struct *napi)
3168{
3169 struct sk_buff *skb, *next;
3170
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08003171 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07003172 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08003173
3174 for (skb = napi->gro_list; skb; skb = next) {
3175 next = skb->next;
3176 skb->next = NULL;
3177 kfree_skb(skb);
3178 }
3179
3180 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00003181 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003182}
3183EXPORT_SYMBOL(netif_napi_del);
3184
Tom Herbert1e94d722010-03-18 17:45:44 -07003185#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00003186/*
3187 * net_rps_action sends any pending IPI's for rps. This is only called from
3188 * softirq and interrupts must be enabled.
3189 */
3190static void net_rps_action(cpumask_t *mask)
3191{
3192 int cpu;
3193
3194 /* Send pending IPI's to kick RPS processing on remote cpus. */
3195 for_each_cpu_mask_nr(cpu, *mask) {
3196 struct softnet_data *queue = &per_cpu(softnet_data, cpu);
3197 if (cpu_online(cpu))
3198 __smp_call_function_single(cpu, &queue->csd, 0);
3199 }
3200 cpus_clear(*mask);
3201}
Tom Herbert1e94d722010-03-18 17:45:44 -07003202#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003203
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204static void net_rx_action(struct softirq_action *h)
3205{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003206 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003207 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07003208 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07003209 void *have;
Tom Herbert1e94d722010-03-18 17:45:44 -07003210#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00003211 int select;
3212 struct rps_remote_softirq_cpus *rcpus;
Tom Herbert1e94d722010-03-18 17:45:44 -07003213#endif
Matt Mackall53fb95d2005-08-11 19:27:43 -07003214
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 local_irq_disable();
3216
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003217 while (!list_empty(list)) {
3218 struct napi_struct *n;
3219 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003221 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003222 * Allow this to run for 2 jiffies since which will allow
3223 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003224 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003225 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 goto softnet_break;
3227
3228 local_irq_enable();
3229
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003230 /* Even though interrupts have been re-enabled, this
3231 * access is safe because interrupts can only add new
3232 * entries to the tail of this list, and only ->poll()
3233 * calls can remove this head entry from the list.
3234 */
stephen hemmingere5e26d72010-02-24 14:01:38 +00003235 n = list_first_entry(list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003237 have = netpoll_poll_lock(n);
3238
3239 weight = n->weight;
3240
David S. Miller0a7606c2007-10-29 21:28:47 -07003241 /* This NAPI_STATE_SCHED test is for avoiding a race
3242 * with netpoll's poll_napi(). Only the entity which
3243 * obtains the lock and sees NAPI_STATE_SCHED set will
3244 * actually make the ->poll() call. Therefore we avoid
3245 * accidently calling ->poll() when NAPI is not scheduled.
3246 */
3247 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00003248 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07003249 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00003250 trace_napi_poll(n);
3251 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003252
3253 WARN_ON_ONCE(work > weight);
3254
3255 budget -= work;
3256
3257 local_irq_disable();
3258
3259 /* Drivers must not modify the NAPI state if they
3260 * consume the entire weight. In such cases this code
3261 * still "owns" the NAPI instance and therefore can
3262 * move the instance around on the list at-will.
3263 */
David S. Millerfed17f32008-01-07 21:00:40 -08003264 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07003265 if (unlikely(napi_disable_pending(n))) {
3266 local_irq_enable();
3267 napi_complete(n);
3268 local_irq_disable();
3269 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003270 list_move_tail(&n->poll_list, list);
3271 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003272
3273 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 }
3275out:
Tom Herbert1e94d722010-03-18 17:45:44 -07003276#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00003277 rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
3278 select = rcpus->select;
3279 rcpus->select ^= 1;
3280
Shannon Nelson515e06c2007-06-23 23:09:23 -07003281 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003282
Tom Herbert0a9627f2010-03-16 08:03:29 +00003283 net_rps_action(&rcpus->mask[select]);
Tom Herbert1e94d722010-03-18 17:45:44 -07003284#else
3285 local_irq_enable();
3286#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00003287
Chris Leechdb217332006-06-17 21:24:58 -07003288#ifdef CONFIG_NET_DMA
3289 /*
3290 * There may not be any more sk_buffs coming right now, so push
3291 * any pending DMA copies to hardware
3292 */
Dan Williams2ba05622009-01-06 11:38:14 -07003293 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003294#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003295
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 return;
3297
3298softnet_break:
3299 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3300 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3301 goto out;
3302}
3303
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003304static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305
3306/**
3307 * register_gifconf - register a SIOCGIF handler
3308 * @family: Address family
3309 * @gifconf: Function handler
3310 *
3311 * Register protocol dependent address dumping routines. The handler
3312 * that is passed must not be freed or reused until it has been replaced
3313 * by another handler.
3314 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003315int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
3317 if (family >= NPROTO)
3318 return -EINVAL;
3319 gifconf_list[family] = gifconf;
3320 return 0;
3321}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003322EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323
3324
3325/*
3326 * Map an interface index to its name (SIOCGIFNAME)
3327 */
3328
3329/*
3330 * We need this ioctl for efficient implementation of the
3331 * if_indextoname() function required by the IPv6 API. Without
3332 * it, we would have to search all the interfaces to find a
3333 * match. --pb
3334 */
3335
Eric W. Biederman881d9662007-09-17 11:56:21 -07003336static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337{
3338 struct net_device *dev;
3339 struct ifreq ifr;
3340
3341 /*
3342 * Fetch the caller's info block.
3343 */
3344
3345 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3346 return -EFAULT;
3347
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003348 rcu_read_lock();
3349 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003351 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 return -ENODEV;
3353 }
3354
3355 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003356 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357
3358 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3359 return -EFAULT;
3360 return 0;
3361}
3362
3363/*
3364 * Perform a SIOCGIFCONF call. This structure will change
3365 * size eventually, and there is nothing I can do about it.
3366 * Thus we will need a 'compatibility mode'.
3367 */
3368
Eric W. Biederman881d9662007-09-17 11:56:21 -07003369static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370{
3371 struct ifconf ifc;
3372 struct net_device *dev;
3373 char __user *pos;
3374 int len;
3375 int total;
3376 int i;
3377
3378 /*
3379 * Fetch the caller's info block.
3380 */
3381
3382 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3383 return -EFAULT;
3384
3385 pos = ifc.ifc_buf;
3386 len = ifc.ifc_len;
3387
3388 /*
3389 * Loop over the interfaces, and write an info block for each.
3390 */
3391
3392 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003393 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 for (i = 0; i < NPROTO; i++) {
3395 if (gifconf_list[i]) {
3396 int done;
3397 if (!pos)
3398 done = gifconf_list[i](dev, NULL, 0);
3399 else
3400 done = gifconf_list[i](dev, pos + total,
3401 len - total);
3402 if (done < 0)
3403 return -EFAULT;
3404 total += done;
3405 }
3406 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003407 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408
3409 /*
3410 * All done. Write the updated control block back to the caller.
3411 */
3412 ifc.ifc_len = total;
3413
3414 /*
3415 * Both BSD and Solaris return 0 here, so we do too.
3416 */
3417 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3418}
3419
3420#ifdef CONFIG_PROC_FS
3421/*
3422 * This is invoked by the /proc filesystem handler to display a device
3423 * in detail.
3424 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003426 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427{
Denis V. Luneve372c412007-11-19 22:31:54 -08003428 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003429 loff_t off;
3430 struct net_device *dev;
3431
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003432 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003433 if (!*pos)
3434 return SEQ_START_TOKEN;
3435
3436 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003437 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003438 if (off++ == *pos)
3439 return dev;
3440
3441 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442}
3443
3444void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3445{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003446 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3447 first_net_device(seq_file_net(seq)) :
3448 next_net_device((struct net_device *)v);
3449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003451 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452}
3453
3454void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003455 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003457 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458}
3459
3460static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3461{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003462 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
Jesper Dangaard Brouer2d13baf2010-01-05 05:50:52 +00003464 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
Rusty Russell5a1b5892007-04-28 21:04:03 -07003465 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3466 dev->name, stats->rx_bytes, stats->rx_packets,
3467 stats->rx_errors,
3468 stats->rx_dropped + stats->rx_missed_errors,
3469 stats->rx_fifo_errors,
3470 stats->rx_length_errors + stats->rx_over_errors +
3471 stats->rx_crc_errors + stats->rx_frame_errors,
3472 stats->rx_compressed, stats->multicast,
3473 stats->tx_bytes, stats->tx_packets,
3474 stats->tx_errors, stats->tx_dropped,
3475 stats->tx_fifo_errors, stats->collisions,
3476 stats->tx_carrier_errors +
3477 stats->tx_aborted_errors +
3478 stats->tx_window_errors +
3479 stats->tx_heartbeat_errors,
3480 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481}
3482
3483/*
3484 * Called from the PROCfs module. This now uses the new arbitrary sized
3485 * /proc/net interface to create /proc/net/dev
3486 */
3487static int dev_seq_show(struct seq_file *seq, void *v)
3488{
3489 if (v == SEQ_START_TOKEN)
3490 seq_puts(seq, "Inter-| Receive "
3491 " | Transmit\n"
3492 " face |bytes packets errs drop fifo frame "
3493 "compressed multicast|bytes packets errs "
3494 "drop fifo colls carrier compressed\n");
3495 else
3496 dev_seq_printf_stats(seq, v);
3497 return 0;
3498}
3499
3500static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3501{
3502 struct netif_rx_stats *rc = NULL;
3503
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003504 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003505 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506 rc = &per_cpu(netdev_rx_stat, *pos);
3507 break;
3508 } else
3509 ++*pos;
3510 return rc;
3511}
3512
3513static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3514{
3515 return softnet_get_online(pos);
3516}
3517
3518static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3519{
3520 ++*pos;
3521 return softnet_get_online(pos);
3522}
3523
3524static void softnet_seq_stop(struct seq_file *seq, void *v)
3525{
3526}
3527
3528static int softnet_seq_show(struct seq_file *seq, void *v)
3529{
3530 struct netif_rx_stats *s = v;
3531
Tom Herbert0a9627f2010-03-16 08:03:29 +00003532 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003533 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003534 0, 0, 0, 0, /* was fastroute */
Tom Herbert0a9627f2010-03-16 08:03:29 +00003535 s->cpu_collision, s->received_rps);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 return 0;
3537}
3538
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003539static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 .start = dev_seq_start,
3541 .next = dev_seq_next,
3542 .stop = dev_seq_stop,
3543 .show = dev_seq_show,
3544};
3545
3546static int dev_seq_open(struct inode *inode, struct file *file)
3547{
Denis V. Luneve372c412007-11-19 22:31:54 -08003548 return seq_open_net(inode, file, &dev_seq_ops,
3549 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003550}
3551
Arjan van de Ven9a321442007-02-12 00:55:35 -08003552static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 .owner = THIS_MODULE,
3554 .open = dev_seq_open,
3555 .read = seq_read,
3556 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003557 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558};
3559
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003560static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 .start = softnet_seq_start,
3562 .next = softnet_seq_next,
3563 .stop = softnet_seq_stop,
3564 .show = softnet_seq_show,
3565};
3566
3567static int softnet_seq_open(struct inode *inode, struct file *file)
3568{
3569 return seq_open(file, &softnet_seq_ops);
3570}
3571
Arjan van de Ven9a321442007-02-12 00:55:35 -08003572static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 .owner = THIS_MODULE,
3574 .open = softnet_seq_open,
3575 .read = seq_read,
3576 .llseek = seq_lseek,
3577 .release = seq_release,
3578};
3579
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003580static void *ptype_get_idx(loff_t pos)
3581{
3582 struct packet_type *pt = NULL;
3583 loff_t i = 0;
3584 int t;
3585
3586 list_for_each_entry_rcu(pt, &ptype_all, list) {
3587 if (i == pos)
3588 return pt;
3589 ++i;
3590 }
3591
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003592 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003593 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3594 if (i == pos)
3595 return pt;
3596 ++i;
3597 }
3598 }
3599 return NULL;
3600}
3601
3602static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003603 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003604{
3605 rcu_read_lock();
3606 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3607}
3608
3609static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3610{
3611 struct packet_type *pt;
3612 struct list_head *nxt;
3613 int hash;
3614
3615 ++*pos;
3616 if (v == SEQ_START_TOKEN)
3617 return ptype_get_idx(0);
3618
3619 pt = v;
3620 nxt = pt->list.next;
3621 if (pt->type == htons(ETH_P_ALL)) {
3622 if (nxt != &ptype_all)
3623 goto found;
3624 hash = 0;
3625 nxt = ptype_base[0].next;
3626 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003627 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003628
3629 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003630 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003631 return NULL;
3632 nxt = ptype_base[hash].next;
3633 }
3634found:
3635 return list_entry(nxt, struct packet_type, list);
3636}
3637
3638static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003639 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003640{
3641 rcu_read_unlock();
3642}
3643
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003644static int ptype_seq_show(struct seq_file *seq, void *v)
3645{
3646 struct packet_type *pt = v;
3647
3648 if (v == SEQ_START_TOKEN)
3649 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003650 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003651 if (pt->type == htons(ETH_P_ALL))
3652 seq_puts(seq, "ALL ");
3653 else
3654 seq_printf(seq, "%04x", ntohs(pt->type));
3655
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003656 seq_printf(seq, " %-8s %pF\n",
3657 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003658 }
3659
3660 return 0;
3661}
3662
3663static const struct seq_operations ptype_seq_ops = {
3664 .start = ptype_seq_start,
3665 .next = ptype_seq_next,
3666 .stop = ptype_seq_stop,
3667 .show = ptype_seq_show,
3668};
3669
3670static int ptype_seq_open(struct inode *inode, struct file *file)
3671{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003672 return seq_open_net(inode, file, &ptype_seq_ops,
3673 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003674}
3675
3676static const struct file_operations ptype_seq_fops = {
3677 .owner = THIS_MODULE,
3678 .open = ptype_seq_open,
3679 .read = seq_read,
3680 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003681 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003682};
3683
3684
Pavel Emelyanov46650792007-10-08 20:38:39 -07003685static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686{
3687 int rc = -ENOMEM;
3688
Eric W. Biederman881d9662007-09-17 11:56:21 -07003689 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003691 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003693 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003694 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003695
Eric W. Biederman881d9662007-09-17 11:56:21 -07003696 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003697 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698 rc = 0;
3699out:
3700 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003701out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003702 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003704 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003706 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 goto out;
3708}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003709
Pavel Emelyanov46650792007-10-08 20:38:39 -07003710static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003711{
3712 wext_proc_exit(net);
3713
3714 proc_net_remove(net, "ptype");
3715 proc_net_remove(net, "softnet_stat");
3716 proc_net_remove(net, "dev");
3717}
3718
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003719static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003720 .init = dev_proc_net_init,
3721 .exit = dev_proc_net_exit,
3722};
3723
3724static int __init dev_proc_init(void)
3725{
3726 return register_pernet_subsys(&dev_proc_ops);
3727}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728#else
3729#define dev_proc_init() 0
3730#endif /* CONFIG_PROC_FS */
3731
3732
3733/**
3734 * netdev_set_master - set up master/slave pair
3735 * @slave: slave device
3736 * @master: new master device
3737 *
3738 * Changes the master device of the slave. Pass %NULL to break the
3739 * bonding. The caller must hold the RTNL semaphore. On a failure
3740 * a negative errno code is returned. On success the reference counts
3741 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3742 * function returns zero.
3743 */
3744int netdev_set_master(struct net_device *slave, struct net_device *master)
3745{
3746 struct net_device *old = slave->master;
3747
3748 ASSERT_RTNL();
3749
3750 if (master) {
3751 if (old)
3752 return -EBUSY;
3753 dev_hold(master);
3754 }
3755
3756 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003757
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 synchronize_net();
3759
3760 if (old)
3761 dev_put(old);
3762
3763 if (master)
3764 slave->flags |= IFF_SLAVE;
3765 else
3766 slave->flags &= ~IFF_SLAVE;
3767
3768 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3769 return 0;
3770}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003771EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003773static void dev_change_rx_flags(struct net_device *dev, int flags)
3774{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003775 const struct net_device_ops *ops = dev->netdev_ops;
3776
3777 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3778 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003779}
3780
Wang Chendad9b332008-06-18 01:48:28 -07003781static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003782{
3783 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003784 uid_t uid;
3785 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003786
Patrick McHardy24023452007-07-14 18:51:31 -07003787 ASSERT_RTNL();
3788
Wang Chendad9b332008-06-18 01:48:28 -07003789 dev->flags |= IFF_PROMISC;
3790 dev->promiscuity += inc;
3791 if (dev->promiscuity == 0) {
3792 /*
3793 * Avoid overflow.
3794 * If inc causes overflow, untouch promisc and return error.
3795 */
3796 if (inc < 0)
3797 dev->flags &= ~IFF_PROMISC;
3798 else {
3799 dev->promiscuity -= inc;
3800 printk(KERN_WARNING "%s: promiscuity touches roof, "
3801 "set promiscuity failed, promiscuity feature "
3802 "of device might be broken.\n", dev->name);
3803 return -EOVERFLOW;
3804 }
3805 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003806 if (dev->flags != old_flags) {
3807 printk(KERN_INFO "device %s %s promiscuous mode\n",
3808 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3809 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003810 if (audit_enabled) {
3811 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003812 audit_log(current->audit_context, GFP_ATOMIC,
3813 AUDIT_ANOM_PROMISCUOUS,
3814 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3815 dev->name, (dev->flags & IFF_PROMISC),
3816 (old_flags & IFF_PROMISC),
3817 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003818 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003819 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003820 }
Patrick McHardy24023452007-07-14 18:51:31 -07003821
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003822 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003823 }
Wang Chendad9b332008-06-18 01:48:28 -07003824 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003825}
3826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827/**
3828 * dev_set_promiscuity - update promiscuity count on a device
3829 * @dev: device
3830 * @inc: modifier
3831 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003832 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833 * remains above zero the interface remains promiscuous. Once it hits zero
3834 * the device reverts back to normal filtering operation. A negative inc
3835 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003836 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 */
Wang Chendad9b332008-06-18 01:48:28 -07003838int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839{
3840 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003841 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
Wang Chendad9b332008-06-18 01:48:28 -07003843 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003844 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003845 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003846 if (dev->flags != old_flags)
3847 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003848 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003850EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851
3852/**
3853 * dev_set_allmulti - update allmulti count on a device
3854 * @dev: device
3855 * @inc: modifier
3856 *
3857 * Add or remove reception of all multicast frames to a device. While the
3858 * count in the device remains above zero the interface remains listening
3859 * to all interfaces. Once it hits zero the device reverts back to normal
3860 * filtering operation. A negative @inc value is used to drop the counter
3861 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003862 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003863 */
3864
Wang Chendad9b332008-06-18 01:48:28 -07003865int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866{
3867 unsigned short old_flags = dev->flags;
3868
Patrick McHardy24023452007-07-14 18:51:31 -07003869 ASSERT_RTNL();
3870
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003872 dev->allmulti += inc;
3873 if (dev->allmulti == 0) {
3874 /*
3875 * Avoid overflow.
3876 * If inc causes overflow, untouch allmulti and return error.
3877 */
3878 if (inc < 0)
3879 dev->flags &= ~IFF_ALLMULTI;
3880 else {
3881 dev->allmulti -= inc;
3882 printk(KERN_WARNING "%s: allmulti touches roof, "
3883 "set allmulti failed, allmulti feature of "
3884 "device might be broken.\n", dev->name);
3885 return -EOVERFLOW;
3886 }
3887 }
Patrick McHardy24023452007-07-14 18:51:31 -07003888 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003889 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003890 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003891 }
Wang Chendad9b332008-06-18 01:48:28 -07003892 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003893}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003894EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003895
3896/*
3897 * Upload unicast and multicast address lists to device and
3898 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003899 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003900 * are present.
3901 */
3902void __dev_set_rx_mode(struct net_device *dev)
3903{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003904 const struct net_device_ops *ops = dev->netdev_ops;
3905
Patrick McHardy4417da62007-06-27 01:28:10 -07003906 /* dev_open will call this function so the list will stay sane. */
3907 if (!(dev->flags&IFF_UP))
3908 return;
3909
3910 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003911 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003912
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003913 if (ops->ndo_set_rx_mode)
3914 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003915 else {
3916 /* Unicast addresses changes may only happen under the rtnl,
3917 * therefore calling __dev_set_promiscuity here is safe.
3918 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003919 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003920 __dev_set_promiscuity(dev, 1);
3921 dev->uc_promisc = 1;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003922 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003923 __dev_set_promiscuity(dev, -1);
3924 dev->uc_promisc = 0;
3925 }
3926
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003927 if (ops->ndo_set_multicast_list)
3928 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003929 }
3930}
3931
3932void dev_set_rx_mode(struct net_device *dev)
3933{
David S. Millerb9e40852008-07-15 00:15:08 -07003934 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003935 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003936 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937}
3938
Jiri Pirkof001fde2009-05-05 02:48:28 +00003939/* hw addresses list handling functions */
3940
Jiri Pirko31278e72009-06-17 01:12:19 +00003941static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3942 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003943{
3944 struct netdev_hw_addr *ha;
3945 int alloc_size;
3946
3947 if (addr_len > MAX_ADDR_LEN)
3948 return -EINVAL;
3949
Jiri Pirko31278e72009-06-17 01:12:19 +00003950 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003951 if (!memcmp(ha->addr, addr, addr_len) &&
3952 ha->type == addr_type) {
3953 ha->refcount++;
3954 return 0;
3955 }
3956 }
3957
3958
Jiri Pirkof001fde2009-05-05 02:48:28 +00003959 alloc_size = sizeof(*ha);
3960 if (alloc_size < L1_CACHE_BYTES)
3961 alloc_size = L1_CACHE_BYTES;
3962 ha = kmalloc(alloc_size, GFP_ATOMIC);
3963 if (!ha)
3964 return -ENOMEM;
3965 memcpy(ha->addr, addr, addr_len);
3966 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003967 ha->refcount = 1;
3968 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003969 list_add_tail_rcu(&ha->list, &list->list);
3970 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003971 return 0;
3972}
3973
3974static void ha_rcu_free(struct rcu_head *head)
3975{
3976 struct netdev_hw_addr *ha;
3977
3978 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3979 kfree(ha);
3980}
3981
Jiri Pirko31278e72009-06-17 01:12:19 +00003982static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3983 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003984{
3985 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003986
Jiri Pirko31278e72009-06-17 01:12:19 +00003987 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003988 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003989 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003990 if (--ha->refcount)
3991 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003992 list_del_rcu(&ha->list);
3993 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003994 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003995 return 0;
3996 }
3997 }
3998 return -ENOENT;
3999}
4000
Jiri Pirko31278e72009-06-17 01:12:19 +00004001static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
4002 struct netdev_hw_addr_list *from_list,
4003 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004004 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00004005{
4006 int err;
4007 struct netdev_hw_addr *ha, *ha2;
4008 unsigned char type;
4009
Jiri Pirko31278e72009-06-17 01:12:19 +00004010 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00004011 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00004012 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004013 if (err)
4014 goto unroll;
4015 }
4016 return 0;
4017
4018unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00004019 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00004020 if (ha2 == ha)
4021 break;
4022 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00004023 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004024 }
4025 return err;
4026}
4027
Jiri Pirko31278e72009-06-17 01:12:19 +00004028static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
4029 struct netdev_hw_addr_list *from_list,
4030 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004031 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00004032{
4033 struct netdev_hw_addr *ha;
4034 unsigned char type;
4035
Jiri Pirko31278e72009-06-17 01:12:19 +00004036 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00004037 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00004038 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004039 }
4040}
4041
Jiri Pirko31278e72009-06-17 01:12:19 +00004042static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4043 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004044 int addr_len)
4045{
4046 int err = 0;
4047 struct netdev_hw_addr *ha, *tmp;
4048
Jiri Pirko31278e72009-06-17 01:12:19 +00004049 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00004050 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00004051 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004052 addr_len, ha->type);
4053 if (err)
4054 break;
4055 ha->synced = true;
4056 ha->refcount++;
4057 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00004058 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
4059 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004060 }
4061 }
4062 return err;
4063}
4064
Jiri Pirko31278e72009-06-17 01:12:19 +00004065static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4066 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004067 int addr_len)
4068{
4069 struct netdev_hw_addr *ha, *tmp;
4070
Jiri Pirko31278e72009-06-17 01:12:19 +00004071 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00004072 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00004073 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004074 addr_len, ha->type);
4075 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00004076 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004077 addr_len, ha->type);
4078 }
4079 }
4080}
4081
Jiri Pirko31278e72009-06-17 01:12:19 +00004082static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00004083{
4084 struct netdev_hw_addr *ha, *tmp;
4085
Jiri Pirko31278e72009-06-17 01:12:19 +00004086 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00004087 list_del_rcu(&ha->list);
4088 call_rcu(&ha->rcu_head, ha_rcu_free);
4089 }
Jiri Pirko31278e72009-06-17 01:12:19 +00004090 list->count = 0;
4091}
4092
4093static void __hw_addr_init(struct netdev_hw_addr_list *list)
4094{
4095 INIT_LIST_HEAD(&list->list);
4096 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00004097}
4098
4099/* Device addresses handling functions */
4100
4101static void dev_addr_flush(struct net_device *dev)
4102{
4103 /* rtnl_mutex must be held here */
4104
Jiri Pirko31278e72009-06-17 01:12:19 +00004105 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004106 dev->dev_addr = NULL;
4107}
4108
4109static int dev_addr_init(struct net_device *dev)
4110{
4111 unsigned char addr[MAX_ADDR_LEN];
4112 struct netdev_hw_addr *ha;
4113 int err;
4114
4115 /* rtnl_mutex must be held here */
4116
Jiri Pirko31278e72009-06-17 01:12:19 +00004117 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00004118 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00004119 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00004120 NETDEV_HW_ADDR_T_LAN);
4121 if (!err) {
4122 /*
4123 * Get the first (previously created) address from the list
4124 * and set dev_addr pointer to this location.
4125 */
Jiri Pirko31278e72009-06-17 01:12:19 +00004126 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00004127 struct netdev_hw_addr, list);
4128 dev->dev_addr = ha->addr;
4129 }
4130 return err;
4131}
4132
4133/**
4134 * dev_addr_add - Add a device address
4135 * @dev: device
4136 * @addr: address to add
4137 * @addr_type: address type
4138 *
4139 * Add a device address to the device or increase the reference count if
4140 * it already exists.
4141 *
4142 * The caller must hold the rtnl_mutex.
4143 */
4144int dev_addr_add(struct net_device *dev, unsigned char *addr,
4145 unsigned char addr_type)
4146{
4147 int err;
4148
4149 ASSERT_RTNL();
4150
Jiri Pirko31278e72009-06-17 01:12:19 +00004151 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004152 if (!err)
4153 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4154 return err;
4155}
4156EXPORT_SYMBOL(dev_addr_add);
4157
4158/**
4159 * dev_addr_del - Release a device address.
4160 * @dev: device
4161 * @addr: address to delete
4162 * @addr_type: address type
4163 *
4164 * Release reference to a device address and remove it from the device
4165 * if the reference count drops to zero.
4166 *
4167 * The caller must hold the rtnl_mutex.
4168 */
4169int dev_addr_del(struct net_device *dev, unsigned char *addr,
4170 unsigned char addr_type)
4171{
4172 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00004173 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00004174
4175 ASSERT_RTNL();
4176
Jiri Pirkoccffad252009-05-22 23:22:17 +00004177 /*
4178 * We can not remove the first address from the list because
4179 * dev->dev_addr points to that.
4180 */
Jiri Pirko31278e72009-06-17 01:12:19 +00004181 ha = list_first_entry(&dev->dev_addrs.list,
4182 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004183 if (ha->addr == dev->dev_addr && ha->refcount == 1)
4184 return -ENOENT;
4185
Jiri Pirko31278e72009-06-17 01:12:19 +00004186 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004187 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004188 if (!err)
4189 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4190 return err;
4191}
4192EXPORT_SYMBOL(dev_addr_del);
4193
4194/**
4195 * dev_addr_add_multiple - Add device addresses from another device
4196 * @to_dev: device to which addresses will be added
4197 * @from_dev: device from which addresses will be added
4198 * @addr_type: address type - 0 means type will be used from from_dev
4199 *
4200 * Add device addresses of the one device to another.
4201 **
4202 * The caller must hold the rtnl_mutex.
4203 */
4204int dev_addr_add_multiple(struct net_device *to_dev,
4205 struct net_device *from_dev,
4206 unsigned char addr_type)
4207{
4208 int err;
4209
4210 ASSERT_RTNL();
4211
4212 if (from_dev->addr_len != to_dev->addr_len)
4213 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004214 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004215 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004216 if (!err)
4217 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4218 return err;
4219}
4220EXPORT_SYMBOL(dev_addr_add_multiple);
4221
4222/**
4223 * dev_addr_del_multiple - Delete device addresses by another device
4224 * @to_dev: device where the addresses will be deleted
4225 * @from_dev: device by which addresses the addresses will be deleted
4226 * @addr_type: address type - 0 means type will used from from_dev
4227 *
4228 * Deletes addresses in to device by the list of addresses in from device.
4229 *
4230 * The caller must hold the rtnl_mutex.
4231 */
4232int dev_addr_del_multiple(struct net_device *to_dev,
4233 struct net_device *from_dev,
4234 unsigned char addr_type)
4235{
4236 ASSERT_RTNL();
4237
4238 if (from_dev->addr_len != to_dev->addr_len)
4239 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004240 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004241 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004242 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4243 return 0;
4244}
4245EXPORT_SYMBOL(dev_addr_del_multiple);
4246
Jiri Pirko31278e72009-06-17 01:12:19 +00004247/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00004248
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004249int __dev_addr_delete(struct dev_addr_list **list, int *count,
4250 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004251{
4252 struct dev_addr_list *da;
4253
4254 for (; (da = *list) != NULL; list = &da->next) {
4255 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4256 alen == da->da_addrlen) {
4257 if (glbl) {
4258 int old_glbl = da->da_gusers;
4259 da->da_gusers = 0;
4260 if (old_glbl == 0)
4261 break;
4262 }
4263 if (--da->da_users)
4264 return 0;
4265
4266 *list = da->next;
4267 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004268 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07004269 return 0;
4270 }
4271 }
4272 return -ENOENT;
4273}
4274
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004275int __dev_addr_add(struct dev_addr_list **list, int *count,
4276 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004277{
4278 struct dev_addr_list *da;
4279
4280 for (da = *list; da != NULL; da = da->next) {
4281 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4282 da->da_addrlen == alen) {
4283 if (glbl) {
4284 int old_glbl = da->da_gusers;
4285 da->da_gusers = 1;
4286 if (old_glbl)
4287 return 0;
4288 }
4289 da->da_users++;
4290 return 0;
4291 }
4292 }
4293
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08004294 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07004295 if (da == NULL)
4296 return -ENOMEM;
4297 memcpy(da->da_addr, addr, alen);
4298 da->da_addrlen = alen;
4299 da->da_users = 1;
4300 da->da_gusers = glbl ? 1 : 0;
4301 da->next = *list;
4302 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004303 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07004304 return 0;
4305}
4306
Patrick McHardy4417da62007-06-27 01:28:10 -07004307/**
4308 * dev_unicast_delete - Release secondary unicast address.
4309 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004310 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07004311 *
4312 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004313 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07004314 *
4315 * The caller must hold the rtnl_mutex.
4316 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004317int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004318{
4319 int err;
4320
4321 ASSERT_RTNL();
4322
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004323 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004324 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4325 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004326 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004327 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004328 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004329 return err;
4330}
4331EXPORT_SYMBOL(dev_unicast_delete);
4332
4333/**
4334 * dev_unicast_add - add a secondary unicast address
4335 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004336 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004337 *
4338 * Add a secondary unicast address to the device or increase
4339 * the reference count if it already exists.
4340 *
4341 * The caller must hold the rtnl_mutex.
4342 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004343int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004344{
4345 int err;
4346
4347 ASSERT_RTNL();
4348
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004349 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004350 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4351 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004352 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004353 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004354 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004355 return err;
4356}
4357EXPORT_SYMBOL(dev_unicast_add);
4358
Chris Leeche83a2ea2008-01-31 16:53:23 -08004359int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4360 struct dev_addr_list **from, int *from_count)
4361{
4362 struct dev_addr_list *da, *next;
4363 int err = 0;
4364
4365 da = *from;
4366 while (da != NULL) {
4367 next = da->next;
4368 if (!da->da_synced) {
4369 err = __dev_addr_add(to, to_count,
4370 da->da_addr, da->da_addrlen, 0);
4371 if (err < 0)
4372 break;
4373 da->da_synced = 1;
4374 da->da_users++;
4375 } else if (da->da_users == 1) {
4376 __dev_addr_delete(to, to_count,
4377 da->da_addr, da->da_addrlen, 0);
4378 __dev_addr_delete(from, from_count,
4379 da->da_addr, da->da_addrlen, 0);
4380 }
4381 da = next;
4382 }
4383 return err;
4384}
Johannes Bergc4029082009-06-17 17:43:30 +02004385EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004386
4387void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4388 struct dev_addr_list **from, int *from_count)
4389{
4390 struct dev_addr_list *da, *next;
4391
4392 da = *from;
4393 while (da != NULL) {
4394 next = da->next;
4395 if (da->da_synced) {
4396 __dev_addr_delete(to, to_count,
4397 da->da_addr, da->da_addrlen, 0);
4398 da->da_synced = 0;
4399 __dev_addr_delete(from, from_count,
4400 da->da_addr, da->da_addrlen, 0);
4401 }
4402 da = next;
4403 }
4404}
Johannes Bergc4029082009-06-17 17:43:30 +02004405EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004406
4407/**
4408 * dev_unicast_sync - Synchronize device's unicast list to another device
4409 * @to: destination device
4410 * @from: source device
4411 *
4412 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004413 * addresses that have no users left. The source device must be
4414 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004415 *
4416 * This function is intended to be called from the dev->set_rx_mode
4417 * function of layered software devices.
4418 */
4419int dev_unicast_sync(struct net_device *to, struct net_device *from)
4420{
4421 int err = 0;
4422
Jiri Pirkoccffad252009-05-22 23:22:17 +00004423 if (to->addr_len != from->addr_len)
4424 return -EINVAL;
4425
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004426 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004427 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004428 if (!err)
4429 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004430 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004431 return err;
4432}
4433EXPORT_SYMBOL(dev_unicast_sync);
4434
4435/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004436 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004437 * @to: destination device
4438 * @from: source device
4439 *
4440 * Remove all addresses that were added to the destination device by
4441 * dev_unicast_sync(). This function is intended to be called from the
4442 * dev->stop function of layered software devices.
4443 */
4444void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4445{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004446 if (to->addr_len != from->addr_len)
4447 return;
4448
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004449 netif_addr_lock_bh(from);
4450 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004451 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004452 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004453 netif_addr_unlock(to);
4454 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004455}
4456EXPORT_SYMBOL(dev_unicast_unsync);
4457
Jiri Pirkoccffad252009-05-22 23:22:17 +00004458static void dev_unicast_flush(struct net_device *dev)
4459{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004460 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004461 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004462 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004463}
4464
4465static void dev_unicast_init(struct net_device *dev)
4466{
Jiri Pirko31278e72009-06-17 01:12:19 +00004467 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004468}
4469
4470
Denis Cheng12972622007-07-18 02:12:56 -07004471static void __dev_addr_discard(struct dev_addr_list **list)
4472{
4473 struct dev_addr_list *tmp;
4474
4475 while (*list != NULL) {
4476 tmp = *list;
4477 *list = tmp->next;
4478 if (tmp->da_users > tmp->da_gusers)
4479 printk("__dev_addr_discard: address leakage! "
4480 "da_users=%d\n", tmp->da_users);
4481 kfree(tmp);
4482 }
4483}
4484
Denis Cheng26cc2522007-07-18 02:12:03 -07004485static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004486{
David S. Millerb9e40852008-07-15 00:15:08 -07004487 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004488
Denis Cheng456ad752007-07-18 02:10:54 -07004489 __dev_addr_discard(&dev->mc_list);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004490 netdev_mc_count(dev) = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004491
David S. Millerb9e40852008-07-15 00:15:08 -07004492 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004493}
4494
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004495/**
4496 * dev_get_flags - get flags reported to userspace
4497 * @dev: device
4498 *
4499 * Get the combination of flag bits exported through APIs to userspace.
4500 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501unsigned dev_get_flags(const struct net_device *dev)
4502{
4503 unsigned flags;
4504
4505 flags = (dev->flags & ~(IFF_PROMISC |
4506 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004507 IFF_RUNNING |
4508 IFF_LOWER_UP |
4509 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 (dev->gflags & (IFF_PROMISC |
4511 IFF_ALLMULTI));
4512
Stefan Rompfb00055a2006-03-20 17:09:11 -08004513 if (netif_running(dev)) {
4514 if (netif_oper_up(dev))
4515 flags |= IFF_RUNNING;
4516 if (netif_carrier_ok(dev))
4517 flags |= IFF_LOWER_UP;
4518 if (netif_dormant(dev))
4519 flags |= IFF_DORMANT;
4520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521
4522 return flags;
4523}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004524EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
Patrick McHardybd380812010-02-26 06:34:53 +00004526int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004529 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530
Patrick McHardy24023452007-07-14 18:51:31 -07004531 ASSERT_RTNL();
4532
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 /*
4534 * Set the flags on our device.
4535 */
4536
4537 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4538 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4539 IFF_AUTOMEDIA)) |
4540 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4541 IFF_ALLMULTI));
4542
4543 /*
4544 * Load in the correct multicast list now the flags have changed.
4545 */
4546
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004547 if ((old_flags ^ flags) & IFF_MULTICAST)
4548 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004549
Patrick McHardy4417da62007-06-27 01:28:10 -07004550 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551
4552 /*
4553 * Have we downed the interface. We handle IFF_UP ourselves
4554 * according to user attempts to set it, rather than blindly
4555 * setting it.
4556 */
4557
4558 ret = 0;
4559 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004560 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004561
4562 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004563 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 }
4565
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004567 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4568
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 dev->gflags ^= IFF_PROMISC;
4570 dev_set_promiscuity(dev, inc);
4571 }
4572
4573 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4574 is important. Some (broken) drivers set IFF_PROMISC, when
4575 IFF_ALLMULTI is requested not asking us and not reporting.
4576 */
4577 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004578 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4579
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 dev->gflags ^= IFF_ALLMULTI;
4581 dev_set_allmulti(dev, inc);
4582 }
4583
Patrick McHardybd380812010-02-26 06:34:53 +00004584 return ret;
4585}
4586
4587void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4588{
4589 unsigned int changes = dev->flags ^ old_flags;
4590
4591 if (changes & IFF_UP) {
4592 if (dev->flags & IFF_UP)
4593 call_netdevice_notifiers(NETDEV_UP, dev);
4594 else
4595 call_netdevice_notifiers(NETDEV_DOWN, dev);
4596 }
4597
4598 if (dev->flags & IFF_UP &&
4599 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4600 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4601}
4602
4603/**
4604 * dev_change_flags - change device settings
4605 * @dev: device
4606 * @flags: device state flags
4607 *
4608 * Change settings on device based state flags. The flags are
4609 * in the userspace exported format.
4610 */
4611int dev_change_flags(struct net_device *dev, unsigned flags)
4612{
4613 int ret, changes;
4614 int old_flags = dev->flags;
4615
4616 ret = __dev_change_flags(dev, flags);
4617 if (ret < 0)
4618 return ret;
4619
4620 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004621 if (changes)
4622 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623
Patrick McHardybd380812010-02-26 06:34:53 +00004624 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 return ret;
4626}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004627EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004629/**
4630 * dev_set_mtu - Change maximum transfer unit
4631 * @dev: device
4632 * @new_mtu: new transfer unit
4633 *
4634 * Change the maximum transfer size of the network device.
4635 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636int dev_set_mtu(struct net_device *dev, int new_mtu)
4637{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004638 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639 int err;
4640
4641 if (new_mtu == dev->mtu)
4642 return 0;
4643
4644 /* MTU must be positive. */
4645 if (new_mtu < 0)
4646 return -EINVAL;
4647
4648 if (!netif_device_present(dev))
4649 return -ENODEV;
4650
4651 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004652 if (ops->ndo_change_mtu)
4653 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 else
4655 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004656
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004658 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 return err;
4660}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004661EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004663/**
4664 * dev_set_mac_address - Change Media Access Control Address
4665 * @dev: device
4666 * @sa: new address
4667 *
4668 * Change the hardware (MAC) address of the device
4669 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4671{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004672 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 int err;
4674
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004675 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 return -EOPNOTSUPP;
4677 if (sa->sa_family != dev->type)
4678 return -EINVAL;
4679 if (!netif_device_present(dev))
4680 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004681 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004683 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 return err;
4685}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004686EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687
4688/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004689 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004691static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692{
4693 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004694 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695
4696 if (!dev)
4697 return -ENODEV;
4698
4699 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004700 case SIOCGIFFLAGS: /* Get interface flags */
4701 ifr->ifr_flags = (short) dev_get_flags(dev);
4702 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004704 case SIOCGIFMETRIC: /* Get the metric on the interface
4705 (currently unused) */
4706 ifr->ifr_metric = 0;
4707 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004709 case SIOCGIFMTU: /* Get the MTU of a device */
4710 ifr->ifr_mtu = dev->mtu;
4711 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004713 case SIOCGIFHWADDR:
4714 if (!dev->addr_len)
4715 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4716 else
4717 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4718 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4719 ifr->ifr_hwaddr.sa_family = dev->type;
4720 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004722 case SIOCGIFSLAVE:
4723 err = -EINVAL;
4724 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004725
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004726 case SIOCGIFMAP:
4727 ifr->ifr_map.mem_start = dev->mem_start;
4728 ifr->ifr_map.mem_end = dev->mem_end;
4729 ifr->ifr_map.base_addr = dev->base_addr;
4730 ifr->ifr_map.irq = dev->irq;
4731 ifr->ifr_map.dma = dev->dma;
4732 ifr->ifr_map.port = dev->if_port;
4733 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004734
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004735 case SIOCGIFINDEX:
4736 ifr->ifr_ifindex = dev->ifindex;
4737 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004738
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004739 case SIOCGIFTXQLEN:
4740 ifr->ifr_qlen = dev->tx_queue_len;
4741 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004742
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004743 default:
4744 /* dev_ioctl() should ensure this case
4745 * is never reached
4746 */
4747 WARN_ON(1);
4748 err = -EINVAL;
4749 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004750
4751 }
4752 return err;
4753}
4754
4755/*
4756 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4757 */
4758static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4759{
4760 int err;
4761 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004762 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004763
4764 if (!dev)
4765 return -ENODEV;
4766
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004767 ops = dev->netdev_ops;
4768
Jeff Garzik14e3e072007-10-08 00:06:32 -07004769 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004770 case SIOCSIFFLAGS: /* Set interface flags */
4771 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004772
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004773 case SIOCSIFMETRIC: /* Set the metric on the interface
4774 (currently unused) */
4775 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004776
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004777 case SIOCSIFMTU: /* Set the MTU of a device */
4778 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004779
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004780 case SIOCSIFHWADDR:
4781 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004783 case SIOCSIFHWBROADCAST:
4784 if (ifr->ifr_hwaddr.sa_family != dev->type)
4785 return -EINVAL;
4786 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4787 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4788 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4789 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004791 case SIOCSIFMAP:
4792 if (ops->ndo_set_config) {
4793 if (!netif_device_present(dev))
4794 return -ENODEV;
4795 return ops->ndo_set_config(dev, &ifr->ifr_map);
4796 }
4797 return -EOPNOTSUPP;
4798
4799 case SIOCADDMULTI:
4800 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4801 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4802 return -EINVAL;
4803 if (!netif_device_present(dev))
4804 return -ENODEV;
4805 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4806 dev->addr_len, 1);
4807
4808 case SIOCDELMULTI:
4809 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4810 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4811 return -EINVAL;
4812 if (!netif_device_present(dev))
4813 return -ENODEV;
4814 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4815 dev->addr_len, 1);
4816
4817 case SIOCSIFTXQLEN:
4818 if (ifr->ifr_qlen < 0)
4819 return -EINVAL;
4820 dev->tx_queue_len = ifr->ifr_qlen;
4821 return 0;
4822
4823 case SIOCSIFNAME:
4824 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4825 return dev_change_name(dev, ifr->ifr_newname);
4826
4827 /*
4828 * Unknown or private ioctl
4829 */
4830 default:
4831 if ((cmd >= SIOCDEVPRIVATE &&
4832 cmd <= SIOCDEVPRIVATE + 15) ||
4833 cmd == SIOCBONDENSLAVE ||
4834 cmd == SIOCBONDRELEASE ||
4835 cmd == SIOCBONDSETHWADDR ||
4836 cmd == SIOCBONDSLAVEINFOQUERY ||
4837 cmd == SIOCBONDINFOQUERY ||
4838 cmd == SIOCBONDCHANGEACTIVE ||
4839 cmd == SIOCGMIIPHY ||
4840 cmd == SIOCGMIIREG ||
4841 cmd == SIOCSMIIREG ||
4842 cmd == SIOCBRADDIF ||
4843 cmd == SIOCBRDELIF ||
4844 cmd == SIOCSHWTSTAMP ||
4845 cmd == SIOCWANDEV) {
4846 err = -EOPNOTSUPP;
4847 if (ops->ndo_do_ioctl) {
4848 if (netif_device_present(dev))
4849 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4850 else
4851 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004853 } else
4854 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855
4856 }
4857 return err;
4858}
4859
4860/*
4861 * This function handles all "interface"-type I/O control requests. The actual
4862 * 'doing' part of this is dev_ifsioc above.
4863 */
4864
4865/**
4866 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004867 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868 * @cmd: command to issue
4869 * @arg: pointer to a struct ifreq in user space
4870 *
4871 * Issue ioctl functions to devices. This is normally called by the
4872 * user space syscall interfaces but can sometimes be useful for
4873 * other purposes. The return value is the return from the syscall if
4874 * positive or a negative errno code on error.
4875 */
4876
Eric W. Biederman881d9662007-09-17 11:56:21 -07004877int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878{
4879 struct ifreq ifr;
4880 int ret;
4881 char *colon;
4882
4883 /* One special case: SIOCGIFCONF takes ifconf argument
4884 and requires shared lock, because it sleeps writing
4885 to user space.
4886 */
4887
4888 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004889 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004890 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004891 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 return ret;
4893 }
4894 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004895 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896
4897 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4898 return -EFAULT;
4899
4900 ifr.ifr_name[IFNAMSIZ-1] = 0;
4901
4902 colon = strchr(ifr.ifr_name, ':');
4903 if (colon)
4904 *colon = 0;
4905
4906 /*
4907 * See which interface the caller is talking about.
4908 */
4909
4910 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004911 /*
4912 * These ioctl calls:
4913 * - can be done by all.
4914 * - atomic and do not require locking.
4915 * - return a value
4916 */
4917 case SIOCGIFFLAGS:
4918 case SIOCGIFMETRIC:
4919 case SIOCGIFMTU:
4920 case SIOCGIFHWADDR:
4921 case SIOCGIFSLAVE:
4922 case SIOCGIFMAP:
4923 case SIOCGIFINDEX:
4924 case SIOCGIFTXQLEN:
4925 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004926 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004927 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004928 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004929 if (!ret) {
4930 if (colon)
4931 *colon = ':';
4932 if (copy_to_user(arg, &ifr,
4933 sizeof(struct ifreq)))
4934 ret = -EFAULT;
4935 }
4936 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004938 case SIOCETHTOOL:
4939 dev_load(net, ifr.ifr_name);
4940 rtnl_lock();
4941 ret = dev_ethtool(net, &ifr);
4942 rtnl_unlock();
4943 if (!ret) {
4944 if (colon)
4945 *colon = ':';
4946 if (copy_to_user(arg, &ifr,
4947 sizeof(struct ifreq)))
4948 ret = -EFAULT;
4949 }
4950 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004952 /*
4953 * These ioctl calls:
4954 * - require superuser power.
4955 * - require strict serialization.
4956 * - return a value
4957 */
4958 case SIOCGMIIPHY:
4959 case SIOCGMIIREG:
4960 case SIOCSIFNAME:
4961 if (!capable(CAP_NET_ADMIN))
4962 return -EPERM;
4963 dev_load(net, ifr.ifr_name);
4964 rtnl_lock();
4965 ret = dev_ifsioc(net, &ifr, cmd);
4966 rtnl_unlock();
4967 if (!ret) {
4968 if (colon)
4969 *colon = ':';
4970 if (copy_to_user(arg, &ifr,
4971 sizeof(struct ifreq)))
4972 ret = -EFAULT;
4973 }
4974 return ret;
4975
4976 /*
4977 * These ioctl calls:
4978 * - require superuser power.
4979 * - require strict serialization.
4980 * - do not return a value
4981 */
4982 case SIOCSIFFLAGS:
4983 case SIOCSIFMETRIC:
4984 case SIOCSIFMTU:
4985 case SIOCSIFMAP:
4986 case SIOCSIFHWADDR:
4987 case SIOCSIFSLAVE:
4988 case SIOCADDMULTI:
4989 case SIOCDELMULTI:
4990 case SIOCSIFHWBROADCAST:
4991 case SIOCSIFTXQLEN:
4992 case SIOCSMIIREG:
4993 case SIOCBONDENSLAVE:
4994 case SIOCBONDRELEASE:
4995 case SIOCBONDSETHWADDR:
4996 case SIOCBONDCHANGEACTIVE:
4997 case SIOCBRADDIF:
4998 case SIOCBRDELIF:
4999 case SIOCSHWTSTAMP:
5000 if (!capable(CAP_NET_ADMIN))
5001 return -EPERM;
5002 /* fall through */
5003 case SIOCBONDSLAVEINFOQUERY:
5004 case SIOCBONDINFOQUERY:
5005 dev_load(net, ifr.ifr_name);
5006 rtnl_lock();
5007 ret = dev_ifsioc(net, &ifr, cmd);
5008 rtnl_unlock();
5009 return ret;
5010
5011 case SIOCGIFMEM:
5012 /* Get the per device memory space. We can add this but
5013 * currently do not support it */
5014 case SIOCSIFMEM:
5015 /* Set the per device memory buffer space.
5016 * Not applicable in our case */
5017 case SIOCSIFLINK:
5018 return -EINVAL;
5019
5020 /*
5021 * Unknown or private ioctl.
5022 */
5023 default:
5024 if (cmd == SIOCWANDEV ||
5025 (cmd >= SIOCDEVPRIVATE &&
5026 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005027 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07005029 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005031 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005033 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005035 }
5036 /* Take care of Wireless Extensions */
5037 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5038 return wext_handle_ioctl(net, &ifr, cmd, arg);
5039 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040 }
5041}
5042
5043
5044/**
5045 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07005046 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 *
5048 * Returns a suitable unique value for a new device interface
5049 * number. The caller must hold the rtnl semaphore or the
5050 * dev_base_lock to be sure it remains unique.
5051 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07005052static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053{
5054 static int ifindex;
5055 for (;;) {
5056 if (++ifindex <= 0)
5057 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005058 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 return ifindex;
5060 }
5061}
5062
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08005064static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065
Stephen Hemminger6f05f622007-03-08 20:46:03 -08005066static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069}
5070
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005071static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005072{
Krishna Kumare93737b2009-12-08 22:26:02 +00005073 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005074
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005075 BUG_ON(dev_boot_phase);
5076 ASSERT_RTNL();
5077
Krishna Kumare93737b2009-12-08 22:26:02 +00005078 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005079 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00005080 * for initialization unwind. Remove those
5081 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005082 */
5083 if (dev->reg_state == NETREG_UNINITIALIZED) {
5084 pr_debug("unregister_netdevice: device %s/%p never "
5085 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005086
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005087 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00005088 list_del(&dev->unreg_list);
5089 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005090 }
5091
5092 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5093
5094 /* If device is running, close it first. */
5095 dev_close(dev);
5096
5097 /* And unlink it from device chain. */
5098 unlist_netdevice(dev);
5099
5100 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005101 }
5102
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005103 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005104
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005105 list_for_each_entry(dev, head, unreg_list) {
5106 /* Shutdown queueing discipline. */
5107 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005108
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005109
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005110 /* Notify protocols, that we are about to destroy
5111 this device. They should clean all the things.
5112 */
5113 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5114
Patrick McHardya2835762010-02-26 06:34:51 +00005115 if (!dev->rtnl_link_ops ||
5116 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5117 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5118
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005119 /*
5120 * Flush the unicast and multicast chains
5121 */
5122 dev_unicast_flush(dev);
5123 dev_addr_discard(dev);
5124
5125 if (dev->netdev_ops->ndo_uninit)
5126 dev->netdev_ops->ndo_uninit(dev);
5127
5128 /* Notifier chain MUST detach us from master device. */
5129 WARN_ON(dev->master);
5130
5131 /* Remove entries from kobject tree */
5132 netdev_unregister_kobject(dev);
5133 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005134
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005135 /* Process any work delayed until the end of the batch */
stephen hemmingere5e26d72010-02-24 14:01:38 +00005136 dev = list_first_entry(head, struct net_device, unreg_list);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005137 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5138
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005139 synchronize_net();
5140
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005141 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005142 dev_put(dev);
5143}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005144
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005145static void rollback_registered(struct net_device *dev)
5146{
5147 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005148
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005149 list_add(&dev->unreg_list, &single);
5150 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005151}
5152
David S. Millere8a04642008-07-17 00:34:19 -07005153static void __netdev_init_queue_locks_one(struct net_device *dev,
5154 struct netdev_queue *dev_queue,
5155 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07005156{
5157 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005158 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07005159 dev_queue->xmit_lock_owner = -1;
5160}
5161
5162static void netdev_init_queue_locks(struct net_device *dev)
5163{
David S. Millere8a04642008-07-17 00:34:19 -07005164 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
5165 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07005166}
5167
Herbert Xub63365a2008-10-23 01:11:29 -07005168unsigned long netdev_fix_features(unsigned long features, const char *name)
5169{
5170 /* Fix illegal SG+CSUM combinations. */
5171 if ((features & NETIF_F_SG) &&
5172 !(features & NETIF_F_ALL_CSUM)) {
5173 if (name)
5174 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
5175 "checksum feature.\n", name);
5176 features &= ~NETIF_F_SG;
5177 }
5178
5179 /* TSO requires that SG is present as well. */
5180 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
5181 if (name)
5182 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
5183 "SG feature.\n", name);
5184 features &= ~NETIF_F_TSO;
5185 }
5186
5187 if (features & NETIF_F_UFO) {
5188 if (!(features & NETIF_F_GEN_CSUM)) {
5189 if (name)
5190 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5191 "since no NETIF_F_HW_CSUM feature.\n",
5192 name);
5193 features &= ~NETIF_F_UFO;
5194 }
5195
5196 if (!(features & NETIF_F_SG)) {
5197 if (name)
5198 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5199 "since no NETIF_F_SG feature.\n", name);
5200 features &= ~NETIF_F_UFO;
5201 }
5202 }
5203
5204 return features;
5205}
5206EXPORT_SYMBOL(netdev_fix_features);
5207
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005209 * netif_stacked_transfer_operstate - transfer operstate
5210 * @rootdev: the root or lower level device to transfer state from
5211 * @dev: the device to transfer operstate to
5212 *
5213 * Transfer operational state from root to device. This is normally
5214 * called when a stacking relationship exists between the root
5215 * device and the device(a leaf device).
5216 */
5217void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5218 struct net_device *dev)
5219{
5220 if (rootdev->operstate == IF_OPER_DORMANT)
5221 netif_dormant_on(dev);
5222 else
5223 netif_dormant_off(dev);
5224
5225 if (netif_carrier_ok(rootdev)) {
5226 if (!netif_carrier_ok(dev))
5227 netif_carrier_on(dev);
5228 } else {
5229 if (netif_carrier_ok(dev))
5230 netif_carrier_off(dev);
5231 }
5232}
5233EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5234
5235/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 * register_netdevice - register a network device
5237 * @dev: device to register
5238 *
5239 * Take a completed network device structure and add it to the kernel
5240 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5241 * chain. 0 is returned on success. A negative errno code is returned
5242 * on a failure to set up the device, or if the name is a duplicate.
5243 *
5244 * Callers must hold the rtnl semaphore. You may want
5245 * register_netdev() instead of this.
5246 *
5247 * BUGS:
5248 * The locking appears insufficient to guarantee two parallel registers
5249 * will not get the same name.
5250 */
5251
5252int register_netdevice(struct net_device *dev)
5253{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005255 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256
5257 BUG_ON(dev_boot_phase);
5258 ASSERT_RTNL();
5259
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005260 might_sleep();
5261
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 /* When net_device's are persistent, this will be fatal. */
5263 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005264 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265
David S. Millerf1f28aa2008-07-15 00:08:33 -07005266 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005267 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07005268 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269
Linus Torvalds1da177e2005-04-16 15:20:36 -07005270 dev->iflink = -1;
5271
Tom Herbert0a9627f2010-03-16 08:03:29 +00005272 if (!dev->num_rx_queues) {
5273 /*
5274 * Allocate a single RX queue if driver never called
5275 * alloc_netdev_mq
5276 */
5277
5278 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
5279 if (!dev->_rx) {
5280 ret = -ENOMEM;
5281 goto out;
5282 }
5283
5284 dev->_rx->first = dev->_rx;
5285 atomic_set(&dev->_rx->count, 1);
5286 dev->num_rx_queues = 1;
5287 }
5288
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005290 if (dev->netdev_ops->ndo_init) {
5291 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292 if (ret) {
5293 if (ret > 0)
5294 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005295 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005296 }
5297 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005298
Octavian Purdilad9031022009-11-18 02:36:59 +00005299 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5300 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005301 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302
Eric W. Biederman881d9662007-09-17 11:56:21 -07005303 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304 if (dev->iflink == -1)
5305 dev->iflink = dev->ifindex;
5306
Stephen Hemmingerd212f872007-06-27 00:47:37 -07005307 /* Fix illegal checksum combinations */
5308 if ((dev->features & NETIF_F_HW_CSUM) &&
5309 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5310 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5311 dev->name);
5312 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5313 }
5314
5315 if ((dev->features & NETIF_F_NO_CSUM) &&
5316 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5317 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5318 dev->name);
5319 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5320 }
5321
Herbert Xub63365a2008-10-23 01:11:29 -07005322 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07005324 /* Enable software GSO if SG is supported. */
5325 if (dev->features & NETIF_F_SG)
5326 dev->features |= NETIF_F_GSO;
5327
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005328 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005329
5330 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5331 ret = notifier_to_errno(ret);
5332 if (ret)
5333 goto err_uninit;
5334
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005335 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005336 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005337 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005338 dev->reg_state = NETREG_REGISTERED;
5339
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340 /*
5341 * Default initial state at registry is that the
5342 * device is present.
5343 */
5344
5345 set_bit(__LINK_STATE_PRESENT, &dev->state);
5346
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005349 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350
5351 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005352 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005353 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005354 if (ret) {
5355 rollback_registered(dev);
5356 dev->reg_state = NETREG_UNREGISTERED;
5357 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005358 /*
5359 * Prevent userspace races by waiting until the network
5360 * device is fully setup before sending notifications.
5361 */
Patrick McHardya2835762010-02-26 06:34:51 +00005362 if (!dev->rtnl_link_ops ||
5363 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5364 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365
5366out:
5367 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005368
5369err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005370 if (dev->netdev_ops->ndo_uninit)
5371 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005372 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005374EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375
5376/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005377 * init_dummy_netdev - init a dummy network device for NAPI
5378 * @dev: device to init
5379 *
5380 * This takes a network device structure and initialize the minimum
5381 * amount of fields so it can be used to schedule NAPI polls without
5382 * registering a full blown interface. This is to be used by drivers
5383 * that need to tie several hardware interfaces to a single NAPI
5384 * poll scheduler due to HW limitations.
5385 */
5386int init_dummy_netdev(struct net_device *dev)
5387{
5388 /* Clear everything. Note we don't initialize spinlocks
5389 * are they aren't supposed to be taken by any of the
5390 * NAPI code and this dummy netdev is supposed to be
5391 * only ever used for NAPI polls
5392 */
5393 memset(dev, 0, sizeof(struct net_device));
5394
5395 /* make sure we BUG if trying to hit standard
5396 * register/unregister code path
5397 */
5398 dev->reg_state = NETREG_DUMMY;
5399
5400 /* initialize the ref count */
5401 atomic_set(&dev->refcnt, 1);
5402
5403 /* NAPI wants this */
5404 INIT_LIST_HEAD(&dev->napi_list);
5405
5406 /* a dummy interface is started by default */
5407 set_bit(__LINK_STATE_PRESENT, &dev->state);
5408 set_bit(__LINK_STATE_START, &dev->state);
5409
5410 return 0;
5411}
5412EXPORT_SYMBOL_GPL(init_dummy_netdev);
5413
5414
5415/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 * register_netdev - register a network device
5417 * @dev: device to register
5418 *
5419 * Take a completed network device structure and add it to the kernel
5420 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5421 * chain. 0 is returned on success. A negative errno code is returned
5422 * on a failure to set up the device, or if the name is a duplicate.
5423 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005424 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 * and expands the device name if you passed a format string to
5426 * alloc_netdev.
5427 */
5428int register_netdev(struct net_device *dev)
5429{
5430 int err;
5431
5432 rtnl_lock();
5433
5434 /*
5435 * If the name is a format string the caller wants us to do a
5436 * name allocation.
5437 */
5438 if (strchr(dev->name, '%')) {
5439 err = dev_alloc_name(dev, dev->name);
5440 if (err < 0)
5441 goto out;
5442 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005443
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 err = register_netdevice(dev);
5445out:
5446 rtnl_unlock();
5447 return err;
5448}
5449EXPORT_SYMBOL(register_netdev);
5450
5451/*
5452 * netdev_wait_allrefs - wait until all references are gone.
5453 *
5454 * This is called when unregistering network devices.
5455 *
5456 * Any protocol or device that holds a reference should register
5457 * for netdevice notification, and cleanup and put back the
5458 * reference if they receive an UNREGISTER event.
5459 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005460 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 */
5462static void netdev_wait_allrefs(struct net_device *dev)
5463{
5464 unsigned long rebroadcast_time, warning_time;
5465
Eric Dumazete014deb2009-11-17 05:59:21 +00005466 linkwatch_forget_dev(dev);
5467
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468 rebroadcast_time = warning_time = jiffies;
5469 while (atomic_read(&dev->refcnt) != 0) {
5470 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005471 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472
5473 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005474 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005475 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005476 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477
5478 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5479 &dev->state)) {
5480 /* We must not have linkwatch events
5481 * pending on unregister. If this
5482 * happens, we simply run the queue
5483 * unscheduled, resulting in a noop
5484 * for this device.
5485 */
5486 linkwatch_run_queue();
5487 }
5488
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005489 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005490
5491 rebroadcast_time = jiffies;
5492 }
5493
5494 msleep(250);
5495
5496 if (time_after(jiffies, warning_time + 10 * HZ)) {
5497 printk(KERN_EMERG "unregister_netdevice: "
5498 "waiting for %s to become free. Usage "
5499 "count = %d\n",
5500 dev->name, atomic_read(&dev->refcnt));
5501 warning_time = jiffies;
5502 }
5503 }
5504}
5505
5506/* The sequence is:
5507 *
5508 * rtnl_lock();
5509 * ...
5510 * register_netdevice(x1);
5511 * register_netdevice(x2);
5512 * ...
5513 * unregister_netdevice(y1);
5514 * unregister_netdevice(y2);
5515 * ...
5516 * rtnl_unlock();
5517 * free_netdev(y1);
5518 * free_netdev(y2);
5519 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005520 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005522 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005523 * without deadlocking with linkwatch via keventd.
5524 * 2) Since we run with the RTNL semaphore not held, we can sleep
5525 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005526 *
5527 * We must not return until all unregister events added during
5528 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005530void netdev_run_todo(void)
5531{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005532 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005535 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005536
5537 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005538
Linus Torvalds1da177e2005-04-16 15:20:36 -07005539 while (!list_empty(&list)) {
5540 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005541 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005542 list_del(&dev->todo_list);
5543
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005544 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 printk(KERN_ERR "network todo '%s' but state %d\n",
5546 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005547 dump_stack();
5548 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005550
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005551 dev->reg_state = NETREG_UNREGISTERED;
5552
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005553 on_each_cpu(flush_backlog, dev, 1);
5554
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005555 netdev_wait_allrefs(dev);
5556
5557 /* paranoia */
5558 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005559 WARN_ON(dev->ip_ptr);
5560 WARN_ON(dev->ip6_ptr);
5561 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005562
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005563 if (dev->destructor)
5564 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005565
5566 /* Free network device */
5567 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569}
5570
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005571/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005572 * dev_txq_stats_fold - fold tx_queues stats
5573 * @dev: device to get statistics from
5574 * @stats: struct net_device_stats to hold results
5575 */
5576void dev_txq_stats_fold(const struct net_device *dev,
5577 struct net_device_stats *stats)
5578{
5579 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5580 unsigned int i;
5581 struct netdev_queue *txq;
5582
5583 for (i = 0; i < dev->num_tx_queues; i++) {
5584 txq = netdev_get_tx_queue(dev, i);
5585 tx_bytes += txq->tx_bytes;
5586 tx_packets += txq->tx_packets;
5587 tx_dropped += txq->tx_dropped;
5588 }
5589 if (tx_bytes || tx_packets || tx_dropped) {
5590 stats->tx_bytes = tx_bytes;
5591 stats->tx_packets = tx_packets;
5592 stats->tx_dropped = tx_dropped;
5593 }
5594}
5595EXPORT_SYMBOL(dev_txq_stats_fold);
5596
5597/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005598 * dev_get_stats - get network device statistics
5599 * @dev: device to get statistics from
5600 *
5601 * Get network statistics from device. The device driver may provide
5602 * its own method by setting dev->netdev_ops->get_stats; otherwise
5603 * the internal statistics structure is used.
5604 */
5605const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005606{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005607 const struct net_device_ops *ops = dev->netdev_ops;
5608
5609 if (ops->ndo_get_stats)
5610 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005611
Eric Dumazetd83345a2009-11-16 03:36:51 +00005612 dev_txq_stats_fold(dev, &dev->stats);
5613 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005614}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005615EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005616
David S. Millerdc2b4842008-07-08 17:18:23 -07005617static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005618 struct netdev_queue *queue,
5619 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005620{
David S. Millerdc2b4842008-07-08 17:18:23 -07005621 queue->dev = dev;
5622}
5623
David S. Millerbb949fb2008-07-08 16:55:56 -07005624static void netdev_init_queues(struct net_device *dev)
5625{
David S. Millere8a04642008-07-17 00:34:19 -07005626 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5627 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005628 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005629}
5630
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005632 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 * @sizeof_priv: size of private data to allocate space for
5634 * @name: device name format string
5635 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005636 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637 *
5638 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005639 * and performs basic initialization. Also allocates subquue structs
5640 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005642struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5643 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005644{
David S. Millere8a04642008-07-17 00:34:19 -07005645 struct netdev_queue *tx;
Tom Herbert0a9627f2010-03-16 08:03:29 +00005646 struct netdev_rx_queue *rx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005648 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005649 struct net_device *p;
Tom Herbert0a9627f2010-03-16 08:03:29 +00005650 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005652 BUG_ON(strlen(name) >= sizeof(dev->name));
5653
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005654 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005655 if (sizeof_priv) {
5656 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005657 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005658 alloc_size += sizeof_priv;
5659 }
5660 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005661 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005663 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005665 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 return NULL;
5667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
Stephen Hemminger79439862008-07-21 13:28:44 -07005669 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005670 if (!tx) {
5671 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5672 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005673 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005674 }
5675
Tom Herbert0a9627f2010-03-16 08:03:29 +00005676 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5677 if (!rx) {
5678 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5679 "rx queues.\n");
5680 goto free_tx;
5681 }
5682
5683 atomic_set(&rx->count, queue_count);
5684
5685 /*
5686 * Set a pointer to first element in the array which holds the
5687 * reference count.
5688 */
5689 for (i = 0; i < queue_count; i++)
5690 rx[i].first = rx;
5691
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005692 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005693 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005694
5695 if (dev_addr_init(dev))
Tom Herbert0a9627f2010-03-16 08:03:29 +00005696 goto free_rx;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005697
Jiri Pirkoccffad252009-05-22 23:22:17 +00005698 dev_unicast_init(dev);
5699
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005700 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005701
David S. Millere8a04642008-07-17 00:34:19 -07005702 dev->_tx = tx;
5703 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005704 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005705
Tom Herbert0a9627f2010-03-16 08:03:29 +00005706 dev->_rx = rx;
5707 dev->num_rx_queues = queue_count;
5708
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005709 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710
David S. Millerbb949fb2008-07-08 16:55:56 -07005711 netdev_init_queues(dev);
5712
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005713 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5714 dev->ethtool_ntuple_list.count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005715 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005716 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005717 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005718 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 setup(dev);
5720 strcpy(dev->name, name);
5721 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005722
Tom Herbert0a9627f2010-03-16 08:03:29 +00005723free_rx:
5724 kfree(rx);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005725free_tx:
5726 kfree(tx);
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005727free_p:
5728 kfree(p);
5729 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005731EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732
5733/**
5734 * free_netdev - free network device
5735 * @dev: device
5736 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005737 * This function does the last stage of destroying an allocated device
5738 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005739 * If this is the last reference then it will be freed.
5740 */
5741void free_netdev(struct net_device *dev)
5742{
Herbert Xud565b0a2008-12-15 23:38:52 -08005743 struct napi_struct *p, *n;
5744
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005745 release_net(dev_net(dev));
5746
David S. Millere8a04642008-07-17 00:34:19 -07005747 kfree(dev->_tx);
5748
Jiri Pirkof001fde2009-05-05 02:48:28 +00005749 /* Flush device addresses */
5750 dev_addr_flush(dev);
5751
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005752 /* Clear ethtool n-tuple list */
5753 ethtool_ntuple_flush(dev);
5754
Herbert Xud565b0a2008-12-15 23:38:52 -08005755 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5756 netif_napi_del(p);
5757
Stephen Hemminger3041a062006-05-26 13:25:24 -07005758 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759 if (dev->reg_state == NETREG_UNINITIALIZED) {
5760 kfree((char *)dev - dev->padded);
5761 return;
5762 }
5763
5764 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5765 dev->reg_state = NETREG_RELEASED;
5766
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005767 /* will free via device release */
5768 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005770EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005771
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005772/**
5773 * synchronize_net - Synchronize with packet receive processing
5774 *
5775 * Wait for packets currently being received to be done.
5776 * Does not block later packets from starting.
5777 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005778void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779{
5780 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005781 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005783EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784
5785/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005786 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005788 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005789 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005791 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005792 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793 *
5794 * Callers must hold the rtnl semaphore. You may want
5795 * unregister_netdev() instead of this.
5796 */
5797
Eric Dumazet44a08732009-10-27 07:03:04 +00005798void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799{
Herbert Xua6620712007-12-12 19:21:56 -08005800 ASSERT_RTNL();
5801
Eric Dumazet44a08732009-10-27 07:03:04 +00005802 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005803 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005804 } else {
5805 rollback_registered(dev);
5806 /* Finish processing unregister after unlock */
5807 net_set_todo(dev);
5808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809}
Eric Dumazet44a08732009-10-27 07:03:04 +00005810EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811
5812/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005813 * unregister_netdevice_many - unregister many devices
5814 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005815 */
5816void unregister_netdevice_many(struct list_head *head)
5817{
5818 struct net_device *dev;
5819
5820 if (!list_empty(head)) {
5821 rollback_registered_many(head);
5822 list_for_each_entry(dev, head, unreg_list)
5823 net_set_todo(dev);
5824 }
5825}
Eric Dumazet63c80992009-10-27 07:06:49 +00005826EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005827
5828/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 * unregister_netdev - remove device from the kernel
5830 * @dev: device
5831 *
5832 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005833 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005834 *
5835 * This is just a wrapper for unregister_netdevice that takes
5836 * the rtnl semaphore. In general you want to use this and not
5837 * unregister_netdevice.
5838 */
5839void unregister_netdev(struct net_device *dev)
5840{
5841 rtnl_lock();
5842 unregister_netdevice(dev);
5843 rtnl_unlock();
5844}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005845EXPORT_SYMBOL(unregister_netdev);
5846
Eric W. Biedermance286d32007-09-12 13:53:49 +02005847/**
5848 * dev_change_net_namespace - move device to different nethost namespace
5849 * @dev: device
5850 * @net: network namespace
5851 * @pat: If not NULL name pattern to try if the current device name
5852 * is already taken in the destination network namespace.
5853 *
5854 * This function shuts down a device interface and moves it
5855 * to a new network namespace. On success 0 is returned, on
5856 * a failure a netagive errno code is returned.
5857 *
5858 * Callers must hold the rtnl semaphore.
5859 */
5860
5861int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5862{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005863 int err;
5864
5865 ASSERT_RTNL();
5866
5867 /* Don't allow namespace local devices to be moved. */
5868 err = -EINVAL;
5869 if (dev->features & NETIF_F_NETNS_LOCAL)
5870 goto out;
5871
Eric W. Biederman38918452008-10-27 17:51:47 -07005872#ifdef CONFIG_SYSFS
5873 /* Don't allow real devices to be moved when sysfs
5874 * is enabled.
5875 */
5876 err = -EINVAL;
5877 if (dev->dev.parent)
5878 goto out;
5879#endif
5880
Eric W. Biedermance286d32007-09-12 13:53:49 +02005881 /* Ensure the device has been registrered */
5882 err = -EINVAL;
5883 if (dev->reg_state != NETREG_REGISTERED)
5884 goto out;
5885
5886 /* Get out if there is nothing todo */
5887 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005888 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005889 goto out;
5890
5891 /* Pick the destination device name, and ensure
5892 * we can use it in the destination network namespace.
5893 */
5894 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005895 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005896 /* We get here if we can't use the current device name */
5897 if (!pat)
5898 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005899 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005900 goto out;
5901 }
5902
5903 /*
5904 * And now a mini version of register_netdevice unregister_netdevice.
5905 */
5906
5907 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005908 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005909
5910 /* And unlink it from device chain */
5911 err = -ENODEV;
5912 unlist_netdevice(dev);
5913
5914 synchronize_net();
5915
5916 /* Shutdown queueing discipline. */
5917 dev_shutdown(dev);
5918
5919 /* Notify protocols, that we are about to destroy
5920 this device. They should clean all the things.
5921 */
5922 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005923 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005924
5925 /*
5926 * Flush the unicast and multicast chains
5927 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005928 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005929 dev_addr_discard(dev);
5930
Eric W. Biederman38918452008-10-27 17:51:47 -07005931 netdev_unregister_kobject(dev);
5932
Eric W. Biedermance286d32007-09-12 13:53:49 +02005933 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005934 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005935
Eric W. Biedermance286d32007-09-12 13:53:49 +02005936 /* If there is an ifindex conflict assign a new one */
5937 if (__dev_get_by_index(net, dev->ifindex)) {
5938 int iflink = (dev->iflink == dev->ifindex);
5939 dev->ifindex = dev_new_index(net);
5940 if (iflink)
5941 dev->iflink = dev->ifindex;
5942 }
5943
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005944 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005945 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005946 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005947
5948 /* Add the device back in the hashes */
5949 list_netdevice(dev);
5950
5951 /* Notify protocols, that a new device appeared. */
5952 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5953
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005954 /*
5955 * Prevent userspace races by waiting until the network
5956 * device is fully setup before sending notifications.
5957 */
5958 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5959
Eric W. Biedermance286d32007-09-12 13:53:49 +02005960 synchronize_net();
5961 err = 0;
5962out:
5963 return err;
5964}
Johannes Berg463d0182009-07-14 00:33:35 +02005965EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005966
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967static int dev_cpu_callback(struct notifier_block *nfb,
5968 unsigned long action,
5969 void *ocpu)
5970{
5971 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005972 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973 struct sk_buff *skb;
5974 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5975 struct softnet_data *sd, *oldsd;
5976
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005977 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978 return NOTIFY_OK;
5979
5980 local_irq_disable();
5981 cpu = smp_processor_id();
5982 sd = &per_cpu(softnet_data, cpu);
5983 oldsd = &per_cpu(softnet_data, oldcpu);
5984
5985 /* Find end of our completion_queue. */
5986 list_skb = &sd->completion_queue;
5987 while (*list_skb)
5988 list_skb = &(*list_skb)->next;
5989 /* Append completion queue from offline CPU. */
5990 *list_skb = oldsd->completion_queue;
5991 oldsd->completion_queue = NULL;
5992
5993 /* Find end of our output_queue. */
5994 list_net = &sd->output_queue;
5995 while (*list_net)
5996 list_net = &(*list_net)->next_sched;
5997 /* Append output queue from offline CPU. */
5998 *list_net = oldsd->output_queue;
5999 oldsd->output_queue = NULL;
6000
6001 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6002 local_irq_enable();
6003
6004 /* Process offline CPU's input_pkt_queue */
6005 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
6006 netif_rx(skb);
6007
6008 return NOTIFY_OK;
6009}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010
6011
Herbert Xu7f353bf2007-08-10 15:47:58 -07006012/**
Herbert Xub63365a2008-10-23 01:11:29 -07006013 * netdev_increment_features - increment feature set by one
6014 * @all: current feature set
6015 * @one: new feature set
6016 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07006017 *
6018 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07006019 * @one to the master device with current feature set @all. Will not
6020 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07006021 */
Herbert Xub63365a2008-10-23 01:11:29 -07006022unsigned long netdev_increment_features(unsigned long all, unsigned long one,
6023 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07006024{
Herbert Xub63365a2008-10-23 01:11:29 -07006025 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07006026 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07006027 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
6028 else if (mask & NETIF_F_ALL_CSUM) {
6029 /* If one device supports v4/v6 checksumming, set for all. */
6030 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6031 !(all & NETIF_F_GEN_CSUM)) {
6032 all &= ~NETIF_F_ALL_CSUM;
6033 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6034 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07006035
Herbert Xub63365a2008-10-23 01:11:29 -07006036 /* If one device supports hw checksumming, set for all. */
6037 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
6038 all &= ~NETIF_F_ALL_CSUM;
6039 all |= NETIF_F_HW_CSUM;
6040 }
6041 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07006042
Herbert Xub63365a2008-10-23 01:11:29 -07006043 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07006044
Herbert Xub63365a2008-10-23 01:11:29 -07006045 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00006046 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07006047 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07006048
6049 return all;
6050}
Herbert Xub63365a2008-10-23 01:11:29 -07006051EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07006052
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006053static struct hlist_head *netdev_create_hash(void)
6054{
6055 int i;
6056 struct hlist_head *hash;
6057
6058 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6059 if (hash != NULL)
6060 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6061 INIT_HLIST_HEAD(&hash[i]);
6062
6063 return hash;
6064}
6065
Eric W. Biederman881d9662007-09-17 11:56:21 -07006066/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07006067static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006068{
Eric W. Biederman881d9662007-09-17 11:56:21 -07006069 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07006070
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006071 net->dev_name_head = netdev_create_hash();
6072 if (net->dev_name_head == NULL)
6073 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006074
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006075 net->dev_index_head = netdev_create_hash();
6076 if (net->dev_index_head == NULL)
6077 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006078
6079 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07006080
6081err_idx:
6082 kfree(net->dev_name_head);
6083err_name:
6084 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07006085}
6086
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07006087/**
6088 * netdev_drivername - network driver for the device
6089 * @dev: network device
6090 * @buffer: buffer for resulting name
6091 * @len: size of buffer
6092 *
6093 * Determine network driver for device.
6094 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006095char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07006096{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07006097 const struct device_driver *driver;
6098 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07006099
6100 if (len <= 0 || !buffer)
6101 return buffer;
6102 buffer[0] = 0;
6103
6104 parent = dev->dev.parent;
6105
6106 if (!parent)
6107 return buffer;
6108
6109 driver = parent->driver;
6110 if (driver && driver->name)
6111 strlcpy(buffer, driver->name, len);
6112 return buffer;
6113}
6114
Pavel Emelyanov46650792007-10-08 20:38:39 -07006115static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07006116{
6117 kfree(net->dev_name_head);
6118 kfree(net->dev_index_head);
6119}
6120
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006121static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07006122 .init = netdev_init,
6123 .exit = netdev_exit,
6124};
6125
Pavel Emelyanov46650792007-10-08 20:38:39 -07006126static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02006127{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006128 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02006129 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006130 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02006131 * initial network namespace
6132 */
6133 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006134 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006135 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006136 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02006137
6138 /* Ignore unmoveable devices (i.e. loopback) */
6139 if (dev->features & NETIF_F_NETNS_LOCAL)
6140 continue;
6141
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00006142 /* Leave virtual devices for the generic cleanup */
6143 if (dev->rtnl_link_ops)
6144 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08006145
Eric W. Biedermance286d32007-09-12 13:53:49 +02006146 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006147 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6148 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02006149 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006150 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02006151 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07006152 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02006153 }
6154 }
6155 rtnl_unlock();
6156}
6157
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006158static void __net_exit default_device_exit_batch(struct list_head *net_list)
6159{
6160 /* At exit all network devices most be removed from a network
6161 * namespace. Do this in the reverse order of registeration.
6162 * Do this across as many network namespaces as possible to
6163 * improve batching efficiency.
6164 */
6165 struct net_device *dev;
6166 struct net *net;
6167 LIST_HEAD(dev_kill_list);
6168
6169 rtnl_lock();
6170 list_for_each_entry(net, net_list, exit_list) {
6171 for_each_netdev_reverse(net, dev) {
6172 if (dev->rtnl_link_ops)
6173 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6174 else
6175 unregister_netdevice_queue(dev, &dev_kill_list);
6176 }
6177 }
6178 unregister_netdevice_many(&dev_kill_list);
6179 rtnl_unlock();
6180}
6181
Denis V. Lunev022cbae2007-11-13 03:23:50 -08006182static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02006183 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00006184 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02006185};
6186
Linus Torvalds1da177e2005-04-16 15:20:36 -07006187/*
6188 * Initialize the DEV module. At boot time this walks the device list and
6189 * unhooks any devices that fail to initialise (normally hardware not
6190 * present) and leaves us with a valid list of present and active devices.
6191 *
6192 */
6193
6194/*
6195 * This is called single threaded during boot, so no need
6196 * to take the rtnl semaphore.
6197 */
6198static int __init net_dev_init(void)
6199{
6200 int i, rc = -ENOMEM;
6201
6202 BUG_ON(!dev_boot_phase);
6203
Linus Torvalds1da177e2005-04-16 15:20:36 -07006204 if (dev_proc_init())
6205 goto out;
6206
Eric W. Biederman8b41d182007-09-26 22:02:53 -07006207 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07006208 goto out;
6209
6210 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08006211 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006212 INIT_LIST_HEAD(&ptype_base[i]);
6213
Eric W. Biederman881d9662007-09-17 11:56:21 -07006214 if (register_pernet_subsys(&netdev_net_ops))
6215 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006216
6217 /*
6218 * Initialise the packet receive queues.
6219 */
6220
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07006221 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006222 struct softnet_data *queue;
6223
6224 queue = &per_cpu(softnet_data, i);
6225 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006226 queue->completion_queue = NULL;
6227 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006228
Tom Herbert1e94d722010-03-18 17:45:44 -07006229#ifdef CONFIG_SMP
Tom Herbert0a9627f2010-03-16 08:03:29 +00006230 queue->csd.func = trigger_softirq;
6231 queue->csd.info = queue;
6232 queue->csd.flags = 0;
Tom Herbert1e94d722010-03-18 17:45:44 -07006233#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00006234
Stephen Hemmingerbea33482007-10-03 16:41:36 -07006235 queue->backlog.poll = process_backlog;
6236 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08006237 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00006238 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006239 }
6240
Linus Torvalds1da177e2005-04-16 15:20:36 -07006241 dev_boot_phase = 0;
6242
Eric W. Biederman505d4f72008-11-07 22:54:20 -08006243 /* The loopback device is special if any other network devices
6244 * is present in a network namespace the loopback device must
6245 * be present. Since we now dynamically allocate and free the
6246 * loopback device ensure this invariant is maintained by
6247 * keeping the loopback device as the first device on the
6248 * list of network devices. Ensuring the loopback devices
6249 * is the first device that appears and the last network device
6250 * that disappears.
6251 */
6252 if (register_pernet_device(&loopback_net_ops))
6253 goto out;
6254
6255 if (register_pernet_device(&default_device_ops))
6256 goto out;
6257
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006258 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6259 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006260
6261 hotcpu_notifier(dev_cpu_callback, 0);
6262 dst_init();
6263 dev_mcast_init();
6264 rc = 0;
6265out:
6266 return rc;
6267}
6268
6269subsys_initcall(net_dev_init);
6270
Krishna Kumare88721f2009-02-18 17:55:02 -08006271static int __init initialize_hashrnd(void)
6272{
Tom Herbert0a9627f2010-03-16 08:03:29 +00006273 get_random_bytes(&hashrnd, sizeof(hashrnd));
Krishna Kumare88721f2009-02-18 17:55:02 -08006274 return 0;
6275}
6276
6277late_initcall_sync(initialize_hashrnd);
6278