blob: 2cba5c521e564d97c789a05715d150020331aa5b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
103#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700104#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
106#include <net/pkt_sched.h>
107#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000108#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/highmem.h>
110#include <linux/init.h>
111#include <linux/kmod.h>
112#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700116#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700127#include <linux/ipv6.h>
128#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700129#include <linux/jhash.h>
130#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700131#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700152 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700174static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * semaphore.
179 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 *
182 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700183 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196EXPORT_SYMBOL(dev_base_lock);
197
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Eric W. Biedermance286d32007-09-12 13:53:49 +0200209/* Device list insertion */
210static int list_netdevice(struct net_device *dev)
211{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900212 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213
214 ASSERT_RTNL();
215
216 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219 hlist_add_head_rcu(&dev->index_hlist,
220 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000225/* Device list removal
226 * caller must respect a RCU grace period before freeing/reusing dev
227 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228static void unlist_netdevice(struct net_device *dev)
229{
230 ASSERT_RTNL();
231
232 /* Unlink dev from the device chain */
233 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800234 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000235 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237 write_unlock_bh(&dev_base_lock);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/*
241 * Our notifier list
242 */
243
Alan Sternf07d5b92006-05-09 15:23:03 -0700244static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246/*
247 * Device drivers call our routines to queue packets here. We empty the
248 * queue in the local softnet handler.
249 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700250
251DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700252EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David S. Millercf508b12008-07-22 14:16:42 -0700254#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255/*
David S. Millerc773e842008-07-08 23:13:53 -0700256 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700257 * according to dev->type
258 */
259static const unsigned short netdev_lock_type[] =
260 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
261 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
262 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
263 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
264 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
265 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
266 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
267 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
268 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
269 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
270 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
271 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
272 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800273 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400274 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000275 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700277static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700278 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
279 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
280 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
281 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
282 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
283 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
284 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
285 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
286 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
287 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
288 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
289 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
290 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800291 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400292 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000293 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294
295static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700296static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static inline unsigned short netdev_lock_pos(unsigned short dev_type)
299{
300 int i;
301
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
304 return i;
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
307}
308
David S. Millercf508b12008-07-22 14:16:42 -0700309static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311{
312 int i;
313
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
317}
David S. Millercf508b12008-07-22 14:16:42 -0700318
319static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
320{
321 int i;
322
323 i = netdev_lock_pos(dev->type);
324 lockdep_set_class_and_name(&dev->addr_list_lock,
325 &netdev_addr_lock_key[i],
326 netdev_lock_name[i]);
327}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328#else
David S. Millercf508b12008-07-22 14:16:42 -0700329static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
330 unsigned short dev_type)
331{
332}
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700334{
335}
336#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/*******************************************************************************
339
340 Protocol management and registration routines
341
342*******************************************************************************/
343
344/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Add a protocol ID to the list. Now that the input handler is
346 * smarter we can dispense with all the messy stuff that used to be
347 * here.
348 *
349 * BEWARE!!! Protocol handlers, mangling input packets,
350 * MUST BE last in hash buckets and checking protocol handlers
351 * MUST start from promiscuous ptype_all chain in net_bh.
352 * It is true now, do not change it.
353 * Explanation follows: if protocol handler, mangling packet, will
354 * be the first on list, it is not able to sense, that packet
355 * is cloned and should be copied-on-write, so that it will
356 * change it and subsequent readers will get broken packet.
357 * --ANK (980803)
358 */
359
360/**
361 * dev_add_pack - add packet handler
362 * @pt: packet type declaration
363 *
364 * Add a protocol handler to the networking stack. The passed &packet_type
365 * is linked into kernel lists and may not be freed until it has been
366 * removed from the kernel lists.
367 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900368 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 * guarantee all CPU's that are in middle of receiving packets
370 * will see the new packet type (until the next received packet).
371 */
372
373void dev_add_pack(struct packet_type *pt)
374{
375 int hash;
376
377 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700380 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 list_add_rcu(&pt->list, &ptype_base[hash]);
383 }
384 spin_unlock_bh(&ptype_lock);
385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700386EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
389 * __dev_remove_pack - remove packet handler
390 * @pt: packet type declaration
391 *
392 * Remove a protocol handler that was previously added to the kernel
393 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
394 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900395 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 *
397 * The packet type might still be in use by receivers
398 * and must not be freed until after all the CPU's have gone
399 * through a quiescent state.
400 */
401void __dev_remove_pack(struct packet_type *pt)
402{
403 struct list_head *head;
404 struct packet_type *pt1;
405
406 spin_lock_bh(&ptype_lock);
407
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700410 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800411 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
420 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
421out:
422 spin_unlock_bh(&ptype_lock);
423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446/******************************************************************************
447
448 Device Boot-time Settings Routines
449
450*******************************************************************************/
451
452/* Boot time configuration table */
453static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454
455/**
456 * netdev_boot_setup_add - add new setup entry
457 * @name: name of the device
458 * @map: configured settings for the device
459 *
460 * Adds new setup entry to the dev_boot_setup list. The function
461 * returns 0 on error and 1 on success. This is a generic routine to
462 * all netdevices.
463 */
464static int netdev_boot_setup_add(char *name, struct ifmap *map)
465{
466 struct netdev_boot_setup *s;
467 int i;
468
469 s = dev_boot_setup;
470 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
471 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
472 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700473 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 memcpy(&s[i].map, map, sizeof(s[i].map));
475 break;
476 }
477 }
478
479 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
480}
481
482/**
483 * netdev_boot_setup_check - check boot time settings
484 * @dev: the netdevice
485 *
486 * Check boot time settings for the device.
487 * The found settings are set for the device to be used
488 * later in the device probing.
489 * Returns 0 if no settings found, 1 if they are.
490 */
491int netdev_boot_setup_check(struct net_device *dev)
492{
493 struct netdev_boot_setup *s = dev_boot_setup;
494 int i;
495
496 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
497 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700498 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 dev->irq = s[i].map.irq;
500 dev->base_addr = s[i].map.base_addr;
501 dev->mem_start = s[i].map.mem_start;
502 dev->mem_end = s[i].map.mem_end;
503 return 1;
504 }
505 }
506 return 0;
507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700508EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510
511/**
512 * netdev_boot_base - get address from boot time settings
513 * @prefix: prefix for network device
514 * @unit: id for network device
515 *
516 * Check boot time settings for the base address of device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found.
520 */
521unsigned long netdev_boot_base(const char *prefix, int unit)
522{
523 const struct netdev_boot_setup *s = dev_boot_setup;
524 char name[IFNAMSIZ];
525 int i;
526
527 sprintf(name, "%s%d", prefix, unit);
528
529 /*
530 * If device already registered then return base of 1
531 * to indicate not to probe for this interface
532 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700533 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 1;
535
536 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
537 if (!strcmp(name, s[i].name))
538 return s[i].map.base_addr;
539 return 0;
540}
541
542/*
543 * Saves at boot time configured settings for any netdevice.
544 */
545int __init netdev_boot_setup(char *str)
546{
547 int ints[5];
548 struct ifmap map;
549
550 str = get_options(str, ARRAY_SIZE(ints), ints);
551 if (!str || !*str)
552 return 0;
553
554 /* Save settings */
555 memset(&map, 0, sizeof(map));
556 if (ints[0] > 0)
557 map.irq = ints[1];
558 if (ints[0] > 1)
559 map.base_addr = ints[2];
560 if (ints[0] > 2)
561 map.mem_start = ints[3];
562 if (ints[0] > 3)
563 map.mem_end = ints[4];
564
565 /* Add new entry to the list */
566 return netdev_boot_setup_add(str, &map);
567}
568
569__setup("netdev=", netdev_boot_setup);
570
571/*******************************************************************************
572
573 Device Interface Subroutines
574
575*******************************************************************************/
576
577/**
578 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700579 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * @name: name to find
581 *
582 * Find an interface by name. Must be called under RTNL semaphore
583 * or @dev_base_lock. If the name is found a pointer to the device
584 * is returned. If the name is not found then %NULL is returned. The
585 * reference counters are not incremented so the caller must be
586 * careful with locks.
587 */
588
Eric W. Biederman881d9662007-09-17 11:56:21 -0700589struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700595 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (!strncmp(dev->name, name, IFNAMSIZ))
597 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 return NULL;
600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700601EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
607 *
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
613 */
614
615struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
616{
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
620
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
623 return dev;
624
625 return NULL;
626}
627EXPORT_SYMBOL(dev_get_by_name_rcu);
628
629/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @name: name to find
633 *
634 * Find an interface by name. This can be called from any
635 * context and does its own locking. The returned handle has
636 * the usage count incremented and the caller must use dev_put() to
637 * release it when it is no longer needed. %NULL is returned if no
638 * matching device is found.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct net_device *dev;
644
Eric Dumazet72c95282009-10-30 07:11:27 +0000645 rcu_read_lock();
646 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (dev)
648 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000649 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return dev;
651}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700652EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654/**
655 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700656 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * @ifindex: index of device
658 *
659 * Search for an interface by index. Returns %NULL if the device
660 * is not found or a pointer to the device. The device has not
661 * had its reference counter increased so the caller must be careful
662 * about locking. The caller must hold either the RTNL semaphore
663 * or @dev_base_lock.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700672 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (dev->ifindex == ifindex)
674 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return NULL;
677}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700678EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000680/**
681 * dev_get_by_index_rcu - find a device by its ifindex
682 * @net: the applicable net namespace
683 * @ifindex: index of device
684 *
685 * Search for an interface by index. Returns %NULL if the device
686 * is not found or a pointer to the device. The device has not
687 * had its reference counter increased so the caller must be careful
688 * about locking. The caller must hold RCU lock.
689 */
690
691struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
692{
693 struct hlist_node *p;
694 struct net_device *dev;
695 struct hlist_head *head = dev_index_hash(net, ifindex);
696
697 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
698 if (dev->ifindex == ifindex)
699 return dev;
700
701 return NULL;
702}
703EXPORT_SYMBOL(dev_get_by_index_rcu);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/**
707 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700708 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * @ifindex: index of device
710 *
711 * Search for an interface by index. Returns NULL if the device
712 * is not found or a pointer to the device. The device returned has
713 * had a reference added and the pointer is safe until the user calls
714 * dev_put to indicate they have finished with it.
715 */
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000721 rcu_read_lock();
722 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (dev)
724 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000725 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return dev;
727}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700728EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730/**
731 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700732 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * @type: media type of device
734 * @ha: hardware address
735 *
736 * Search for an interface by MAC address. Returns NULL if the device
737 * is not found or a pointer to the device. The caller must hold the
738 * rtnl semaphore. The returned device has not had its ref count increased
739 * and the caller must therefore be careful about locking
740 *
741 * BUGS:
742 * If the API was consistent this would be __dev_get_by_hwaddr
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
749 ASSERT_RTNL();
750
Denis V. Lunev81103a52007-12-12 10:47:38 -0800751 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (dev->type == type &&
753 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700754 return dev;
755
756 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300758EXPORT_SYMBOL(dev_getbyhwaddr);
759
Eric W. Biederman881d9662007-09-17 11:56:21 -0700760struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700761{
762 struct net_device *dev;
763
764 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700765 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700766 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700767 return dev;
768
769 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700770}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700771EXPORT_SYMBOL(__dev_getfirstbyhwtype);
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct net_device *dev;
776
777 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700778 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700779 if (dev)
780 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 rtnl_unlock();
782 return dev;
783}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784EXPORT_SYMBOL(dev_getfirstbyhwtype);
785
786/**
787 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700788 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * @if_flags: IFF_* values
790 * @mask: bitmask of bits in if_flags to check
791 *
792 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900793 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
796 */
797
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700798struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
799 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Pavel Emelianov7562f872007-05-03 15:13:45 -0700803 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (((dev->flags ^ if_flags) & mask) == 0) {
807 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700808 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810 }
811 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800812 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700813 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700815EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/**
818 * dev_valid_name - check if name is okay for network device
819 * @name: name string
820 *
821 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700822 * to allow sysfs to work. We also disallow any kind of
823 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800825int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700827 if (*name == '\0')
828 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700829 if (strlen(name) >= IFNAMSIZ)
830 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700831 if (!strcmp(name, ".") || !strcmp(name, ".."))
832 return 0;
833
834 while (*name) {
835 if (*name == '/' || isspace(*name))
836 return 0;
837 name++;
838 }
839 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700841EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 * __dev_alloc_name - allocate a name for a device
845 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200847 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 */
857
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200858static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 const char *p;
862 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700863 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct net_device *d;
865
866 p = strnchr(name, IFNAMSIZ-1, '%');
867 if (p) {
868 /*
869 * Verify the string as this thing may have come from
870 * the user. There must be either one "%d" and no other "%"
871 * characters.
872 */
873 if (p[1] != 'd' || strchr(p + 2, '%'))
874 return -EINVAL;
875
876 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700877 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (!inuse)
879 return -ENOMEM;
880
Eric W. Biederman881d9662007-09-17 11:56:21 -0700881 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (!sscanf(d->name, name, &i))
883 continue;
884 if (i < 0 || i >= max_netdevices)
885 continue;
886
887 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200888 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (!strncmp(buf, d->name, IFNAMSIZ))
890 set_bit(i, inuse);
891 }
892
893 i = find_first_zero_bit(inuse, max_netdevices);
894 free_page((unsigned long) inuse);
895 }
896
Octavian Purdilad9031022009-11-18 02:36:59 +0000897 if (buf != name)
898 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200899 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 /* It is possible to run out of possible slots
903 * when the name is long and there isn't enough space left
904 * for the digits, or if all bits are used.
905 */
906 return -ENFILE;
907}
908
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200909/**
910 * dev_alloc_name - allocate a name for a device
911 * @dev: device
912 * @name: name format string
913 *
914 * Passed a format string - eg "lt%d" it will try and find a suitable
915 * id. It scans list of devices to build up a free map, then chooses
916 * the first empty slot. The caller must hold the dev_base or rtnl lock
917 * while allocating the name and adding the device in order to avoid
918 * duplicates.
919 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
920 * Returns the number of the unit assigned or a negative errno code.
921 */
922
923int dev_alloc_name(struct net_device *dev, const char *name)
924{
925 char buf[IFNAMSIZ];
926 struct net *net;
927 int ret;
928
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900929 BUG_ON(!dev_net(dev));
930 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200931 ret = __dev_alloc_name(net, name, buf);
932 if (ret >= 0)
933 strlcpy(dev->name, buf, IFNAMSIZ);
934 return ret;
935}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700936EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200937
Octavian Purdilad9031022009-11-18 02:36:59 +0000938static int dev_get_valid_name(struct net *net, const char *name, char *buf,
939 bool fmt)
940{
941 if (!dev_valid_name(name))
942 return -EINVAL;
943
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
947 return -EEXIST;
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
950
951 return 0;
952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954/**
955 * dev_change_name - change name of a device
956 * @dev: device
957 * @newname: name (or format string) must be at least IFNAMSIZ
958 *
959 * Change name of a device, can pass format strings "eth%d".
960 * for wildcarding.
961 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700962int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700966 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700967 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900970 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900972 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (dev->flags & IFF_UP)
974 return -EBUSY;
975
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
977 return 0;
978
Herbert Xufcc5a032007-07-30 17:03:38 -0700979 memcpy(oldname, dev->name, IFNAMSIZ);
980
Octavian Purdilad9031022009-11-18 02:36:59 +0000981 err = dev_get_valid_name(net, newname, dev->name, 1);
982 if (err < 0)
983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Herbert Xufcc5a032007-07-30 17:03:38 -0700985rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700986 /* For now only devices in the initial network namespace
987 * are in sysfs.
988 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800989 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -0700990 ret = device_rename(&dev->dev, dev->name);
991 if (ret) {
992 memcpy(dev->name, oldname, IFNAMSIZ);
993 return ret;
994 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700995 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700996
997 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600998 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000999 write_unlock_bh(&dev_base_lock);
1000
1001 synchronize_rcu();
1002
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001005 write_unlock_bh(&dev_base_lock);
1006
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001008 ret = notifier_to_errno(ret);
1009
1010 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001011 /* err >= 0 after dev_alloc_name() or stores the first errno */
1012 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001013 err = ret;
1014 memcpy(dev->name, oldname, IFNAMSIZ);
1015 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001016 } else {
1017 printk(KERN_ERR
1018 "%s: name change rollback failed: %d.\n",
1019 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001020 }
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 return err;
1024}
1025
1026/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001027 * dev_set_alias - change ifalias of a device
1028 * @dev: device
1029 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001030 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001031 *
1032 * Set ifalias for a device,
1033 */
1034int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1035{
1036 ASSERT_RTNL();
1037
1038 if (len >= IFALIASZ)
1039 return -EINVAL;
1040
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001041 if (!len) {
1042 if (dev->ifalias) {
1043 kfree(dev->ifalias);
1044 dev->ifalias = NULL;
1045 }
1046 return 0;
1047 }
1048
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001049 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001050 if (!dev->ifalias)
1051 return -ENOMEM;
1052
1053 strlcpy(dev->ifalias, alias, len+1);
1054 return len;
1055}
1056
1057
1058/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001059 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001060 * @dev: device to cause notification
1061 *
1062 * Called to indicate a device has changed features.
1063 */
1064void netdev_features_change(struct net_device *dev)
1065{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001066 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001067}
1068EXPORT_SYMBOL(netdev_features_change);
1069
1070/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 * netdev_state_change - device changes state
1072 * @dev: device to cause notification
1073 *
1074 * Called to indicate a device has changed state. This function calls
1075 * the notifier chains for netdev_chain and sends a NEWLINK message
1076 * to the routing socket.
1077 */
1078void netdev_state_change(struct net_device *dev)
1079{
1080 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001081 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1083 }
1084}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001085EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Moni Shoua75c78502009-09-15 02:37:40 -07001087void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001088{
Moni Shoua75c78502009-09-15 02:37:40 -07001089 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001090}
1091EXPORT_SYMBOL(netdev_bonding_change);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/**
1094 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001095 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 * @name: name of interface
1097 *
1098 * If a network interface is not present and the process has suitable
1099 * privileges this function loads the module. If module loading is not
1100 * available in this kernel then it becomes a nop.
1101 */
1102
Eric W. Biederman881d9662007-09-17 11:56:21 -07001103void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001105 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Eric Dumazet72c95282009-10-30 07:11:27 +00001107 rcu_read_lock();
1108 dev = dev_get_by_name_rcu(net, name);
1109 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Eric Parisa8f80e82009-08-13 09:44:51 -04001111 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 request_module("%s", name);
1113}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001114EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116/**
1117 * dev_open - prepare an interface for use.
1118 * @dev: device to open
1119 *
1120 * Takes a device from down to up state. The device's private open
1121 * function is invoked and then the multicast lists are loaded. Finally
1122 * the device is moved into the up state and a %NETDEV_UP message is
1123 * sent to the netdev notifier chain.
1124 *
1125 * Calling this function on an active interface is a nop. On a failure
1126 * a negative errno code is returned.
1127 */
1128int dev_open(struct net_device *dev)
1129{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001130 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001131 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001133 ASSERT_RTNL();
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 /*
1136 * Is it already up?
1137 */
1138
1139 if (dev->flags & IFF_UP)
1140 return 0;
1141
1142 /*
1143 * Is it even present?
1144 */
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 /*
1154 * Call device private open method
1155 */
1156 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001157
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001160
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001164 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 * If it went open OK then:
1166 */
1167
Jeff Garzikbada3392007-10-23 20:19:37 -07001168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 /*
1172 * Set the flags.
1173 */
1174 dev->flags |= IFF_UP;
1175
1176 /*
Dan Williams649274d2009-01-11 00:20:39 -08001177 * Enable NET_DMA
1178 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001179 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001180
1181 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 * Initialize multicasting status
1183 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001184 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 /*
1187 * Wakeup transmit queue engine
1188 */
1189 dev_activate(dev);
1190
1191 /*
1192 * ... and announce new interface.
1193 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001194 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return ret;
1198}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001199EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201/**
1202 * dev_close - shutdown an interface.
1203 * @dev: device to shutdown
1204 *
1205 * This function moves an active device into down state. A
1206 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1207 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1208 * chain.
1209 */
1210int dev_close(struct net_device *dev)
1211{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001212 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001213 ASSERT_RTNL();
1214
David S. Miller9d5010d2007-09-12 14:33:25 +02001215 might_sleep();
1216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 if (!(dev->flags & IFF_UP))
1218 return 0;
1219
1220 /*
1221 * Tell people we are going down, so that they can
1222 * prepare to death, when device is still operating.
1223 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001224 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 clear_bit(__LINK_STATE_START, &dev->state);
1227
1228 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001229 * it can be even on different cpu. So just clear netif_running().
1230 *
1231 * dev->stop() will invoke napi_disable() on all of it's
1232 * napi_struct instances on this device.
1233 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001236 dev_deactivate(dev);
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 /*
1239 * Call the device specific close. This cannot fail.
1240 * Only if device is UP
1241 *
1242 * We allow it to be called even after a DETACH hot-plug
1243 * event.
1244 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001245 if (ops->ndo_stop)
1246 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
1248 /*
1249 * Device is now down.
1250 */
1251
1252 dev->flags &= ~IFF_UP;
1253
1254 /*
1255 * Tell people we are down
1256 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001257 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Dan Williams649274d2009-01-11 00:20:39 -08001259 /*
1260 * Shutdown NET_DMA
1261 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001262 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 return 0;
1265}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001266EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001269/**
1270 * dev_disable_lro - disable Large Receive Offload on a device
1271 * @dev: device
1272 *
1273 * Disable Large Receive Offload (LRO) on a net device. Must be
1274 * called under RTNL. This is needed if received packets may be
1275 * forwarded to another interface.
1276 */
1277void dev_disable_lro(struct net_device *dev)
1278{
1279 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1280 dev->ethtool_ops->set_flags) {
1281 u32 flags = dev->ethtool_ops->get_flags(dev);
1282 if (flags & ETH_FLAG_LRO) {
1283 flags &= ~ETH_FLAG_LRO;
1284 dev->ethtool_ops->set_flags(dev, flags);
1285 }
1286 }
1287 WARN_ON(dev->features & NETIF_F_LRO);
1288}
1289EXPORT_SYMBOL(dev_disable_lro);
1290
1291
Eric W. Biederman881d9662007-09-17 11:56:21 -07001292static int dev_boot_phase = 1;
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294/*
1295 * Device change register/unregister. These are not inline or static
1296 * as we export them to the world.
1297 */
1298
1299/**
1300 * register_netdevice_notifier - register a network notifier block
1301 * @nb: notifier
1302 *
1303 * Register a notifier to be called when network device events occur.
1304 * The notifier passed is linked into the kernel structures and must
1305 * not be reused until it has been unregistered. A negative errno code
1306 * is returned on a failure.
1307 *
1308 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001309 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 * view of the network device list.
1311 */
1312
1313int register_netdevice_notifier(struct notifier_block *nb)
1314{
1315 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001316 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001317 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 int err;
1319
1320 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001321 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001322 if (err)
1323 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001324 if (dev_boot_phase)
1325 goto unlock;
1326 for_each_net(net) {
1327 for_each_netdev(net, dev) {
1328 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1329 err = notifier_to_errno(err);
1330 if (err)
1331 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Eric W. Biederman881d9662007-09-17 11:56:21 -07001333 if (!(dev->flags & IFF_UP))
1334 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001335
Eric W. Biederman881d9662007-09-17 11:56:21 -07001336 nb->notifier_call(nb, NETDEV_UP, dev);
1337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001339
1340unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 rtnl_unlock();
1342 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001343
1344rollback:
1345 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001346 for_each_net(net) {
1347 for_each_netdev(net, dev) {
1348 if (dev == last)
1349 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001350
Eric W. Biederman881d9662007-09-17 11:56:21 -07001351 if (dev->flags & IFF_UP) {
1352 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1353 nb->notifier_call(nb, NETDEV_DOWN, dev);
1354 }
1355 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001356 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001357 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001358 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001359
1360 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001361 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001363EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
1365/**
1366 * unregister_netdevice_notifier - unregister a network notifier block
1367 * @nb: notifier
1368 *
1369 * Unregister a notifier previously registered by
1370 * register_netdevice_notifier(). The notifier is unlinked into the
1371 * kernel structures and may then be reused. A negative errno code
1372 * is returned on a failure.
1373 */
1374
1375int unregister_netdevice_notifier(struct notifier_block *nb)
1376{
Herbert Xu9f514952006-03-25 01:24:25 -08001377 int err;
1378
1379 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001380 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001381 rtnl_unlock();
1382 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001384EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386/**
1387 * call_netdevice_notifiers - call all network notifier blocks
1388 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001389 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 *
1391 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001392 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 */
1394
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001395int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001397 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398}
1399
1400/* When > 0 there are consumers of rx skb time stamps */
1401static atomic_t netstamp_needed = ATOMIC_INIT(0);
1402
1403void net_enable_timestamp(void)
1404{
1405 atomic_inc(&netstamp_needed);
1406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409void net_disable_timestamp(void)
1410{
1411 atomic_dec(&netstamp_needed);
1412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001413EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001415static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
1417 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001418 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001419 else
1420 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
Arnd Bergmann44540962009-11-26 06:07:08 +00001423/**
1424 * dev_forward_skb - loopback an skb to another netif
1425 *
1426 * @dev: destination network device
1427 * @skb: buffer to forward
1428 *
1429 * return values:
1430 * NET_RX_SUCCESS (no congestion)
1431 * NET_RX_DROP (packet was dropped)
1432 *
1433 * dev_forward_skb can be used for injecting an skb from the
1434 * start_xmit function of one device into the receive queue
1435 * of another device.
1436 *
1437 * The receiving device may be in another namespace, so
1438 * we have to clear all information in the skb that could
1439 * impact namespace isolation.
1440 */
1441int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1442{
1443 skb_orphan(skb);
1444
1445 if (!(dev->flags & IFF_UP))
1446 return NET_RX_DROP;
1447
1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP;
1450
1451 skb_dst_drop(skb);
1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb);
1459}
1460EXPORT_SYMBOL_GPL(dev_forward_skb);
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/*
1463 * Support routine. Sends outgoing frames to any network
1464 * taps currently in use.
1465 */
1466
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001467static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468{
1469 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001470
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001471#ifdef CONFIG_NET_CLS_ACT
1472 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1473 net_timestamp(skb);
1474#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001475 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001476#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 rcu_read_lock();
1479 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1480 /* Never send packets back to the socket
1481 * they originated from - MvS (miquels@drinkel.ow.org)
1482 */
1483 if ((ptype->dev == dev || !ptype->dev) &&
1484 (ptype->af_packet_priv == NULL ||
1485 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001486 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 if (!skb2)
1488 break;
1489
1490 /* skb->nh should be correctly
1491 set by sender, so that the second statement is
1492 just protection against buggy protocols.
1493 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001494 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001496 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001497 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (net_ratelimit())
1499 printk(KERN_CRIT "protocol %04x is "
1500 "buggy, dev %s\n",
1501 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001502 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 }
1504
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001505 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001507 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509 }
1510 rcu_read_unlock();
1511}
1512
Denis Vlasenko56079432006-03-29 15:57:29 -08001513
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001514static inline void __netif_reschedule(struct Qdisc *q)
1515{
1516 struct softnet_data *sd;
1517 unsigned long flags;
1518
1519 local_irq_save(flags);
1520 sd = &__get_cpu_var(softnet_data);
1521 q->next_sched = sd->output_queue;
1522 sd->output_queue = q;
1523 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1524 local_irq_restore(flags);
1525}
1526
David S. Miller37437bb2008-07-16 02:15:04 -07001527void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001528{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001529 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1530 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001531}
1532EXPORT_SYMBOL(__netif_schedule);
1533
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001534void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001535{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001536 if (atomic_dec_and_test(&skb->users)) {
1537 struct softnet_data *sd;
1538 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001539
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001540 local_irq_save(flags);
1541 sd = &__get_cpu_var(softnet_data);
1542 skb->next = sd->completion_queue;
1543 sd->completion_queue = skb;
1544 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1545 local_irq_restore(flags);
1546 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001547}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001548EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001549
1550void dev_kfree_skb_any(struct sk_buff *skb)
1551{
1552 if (in_irq() || irqs_disabled())
1553 dev_kfree_skb_irq(skb);
1554 else
1555 dev_kfree_skb(skb);
1556}
1557EXPORT_SYMBOL(dev_kfree_skb_any);
1558
1559
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001560/**
1561 * netif_device_detach - mark device as removed
1562 * @dev: network device
1563 *
1564 * Mark device as removed from system and therefore no longer available.
1565 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001566void netif_device_detach(struct net_device *dev)
1567{
1568 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1569 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001570 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001571 }
1572}
1573EXPORT_SYMBOL(netif_device_detach);
1574
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001575/**
1576 * netif_device_attach - mark device as attached
1577 * @dev: network device
1578 *
1579 * Mark device as attached from system and restart if needed.
1580 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001581void netif_device_attach(struct net_device *dev)
1582{
1583 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1584 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001585 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001586 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001587 }
1588}
1589EXPORT_SYMBOL(netif_device_attach);
1590
Ben Hutchings6de329e2008-06-16 17:02:28 -07001591static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1592{
1593 return ((features & NETIF_F_GEN_CSUM) ||
1594 ((features & NETIF_F_IP_CSUM) &&
1595 protocol == htons(ETH_P_IP)) ||
1596 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001597 protocol == htons(ETH_P_IPV6)) ||
1598 ((features & NETIF_F_FCOE_CRC) &&
1599 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001600}
1601
1602static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1603{
1604 if (can_checksum_protocol(dev->features, skb->protocol))
1605 return true;
1606
1607 if (skb->protocol == htons(ETH_P_8021Q)) {
1608 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1609 if (can_checksum_protocol(dev->features & dev->vlan_features,
1610 veh->h_vlan_encapsulated_proto))
1611 return true;
1612 }
1613
1614 return false;
1615}
Denis Vlasenko56079432006-03-29 15:57:29 -08001616
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617/*
1618 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path.
1620 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001621int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622{
Al Virod3bc23e2006-11-14 21:24:49 -08001623 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001624 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Patrick McHardy84fa7932006-08-29 16:44:56 -07001626 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001627 goto out_set_summed;
1628
1629 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001630 /* Let GSO fix up the checksum. */
1631 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 }
1633
Herbert Xua0308472007-10-15 01:47:15 -07001634 offset = skb->csum_start - skb_headroom(skb);
1635 BUG_ON(offset >= skb_headlen(skb));
1636 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1637
1638 offset += skb->csum_offset;
1639 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1640
1641 if (skb_cloned(skb) &&
1642 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1644 if (ret)
1645 goto out;
1646 }
1647
Herbert Xua0308472007-10-15 01:47:15 -07001648 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001649out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001651out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 return ret;
1653}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001654EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001656/**
1657 * skb_gso_segment - Perform segmentation on skb.
1658 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001660 *
1661 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001662 *
1663 * It may return NULL if the skb requires no segmentation. This is
1664 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001666struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001667{
1668 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1669 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001670 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001671 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001672
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001673 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001674 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001675 __skb_pull(skb, skb->mac_len);
1676
Herbert Xu67fd1a72009-01-19 16:26:44 -08001677 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1678 struct net_device *dev = skb->dev;
1679 struct ethtool_drvinfo info = {};
1680
1681 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1682 dev->ethtool_ops->get_drvinfo(dev, &info);
1683
1684 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1685 "ip_summed=%d",
1686 info.driver, dev ? dev->features : 0L,
1687 skb->sk ? skb->sk->sk_route_caps : 0L,
1688 skb->len, skb->data_len, skb->ip_summed);
1689
Herbert Xua430a432006-07-08 13:34:56 -07001690 if (skb_header_cloned(skb) &&
1691 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1692 return ERR_PTR(err);
1693 }
1694
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001695 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001696 list_for_each_entry_rcu(ptype,
1697 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001699 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001700 err = ptype->gso_send_check(skb);
1701 segs = ERR_PTR(err);
1702 if (err || skb_gso_ok(skb, features))
1703 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001704 __skb_push(skb, (skb->data -
1705 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001706 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001707 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001708 break;
1709 }
1710 }
1711 rcu_read_unlock();
1712
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001713 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001714
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001715 return segs;
1716}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001717EXPORT_SYMBOL(skb_gso_segment);
1718
Herbert Xufb286bb2005-11-10 13:01:24 -08001719/* Take action when hardware reception checksum errors are detected. */
1720#ifdef CONFIG_BUG
1721void netdev_rx_csum_fault(struct net_device *dev)
1722{
1723 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001724 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001725 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001726 dump_stack();
1727 }
1728}
1729EXPORT_SYMBOL(netdev_rx_csum_fault);
1730#endif
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732/* Actually, we should eliminate this check as soon as we know, that:
1733 * 1. IOMMU is present and allows to map all the memory.
1734 * 2. No high memory really exists on this machine.
1735 */
1736
1737static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1738{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001739#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 int i;
1741
1742 if (dev->features & NETIF_F_HIGHDMA)
1743 return 0;
1744
1745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1746 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1747 return 1;
1748
Herbert Xu3d3a8532006-06-27 13:33:10 -07001749#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 return 0;
1751}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001753struct dev_gso_cb {
1754 void (*destructor)(struct sk_buff *skb);
1755};
1756
1757#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1758
1759static void dev_gso_skb_destructor(struct sk_buff *skb)
1760{
1761 struct dev_gso_cb *cb;
1762
1763 do {
1764 struct sk_buff *nskb = skb->next;
1765
1766 skb->next = nskb->next;
1767 nskb->next = NULL;
1768 kfree_skb(nskb);
1769 } while (skb->next);
1770
1771 cb = DEV_GSO_CB(skb);
1772 if (cb->destructor)
1773 cb->destructor(skb);
1774}
1775
1776/**
1777 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1778 * @skb: buffer to segment
1779 *
1780 * This function segments the given skb and stores the list of segments
1781 * in skb->next.
1782 */
1783static int dev_gso_segment(struct sk_buff *skb)
1784{
1785 struct net_device *dev = skb->dev;
1786 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001787 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1788 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001789
Herbert Xu576a30e2006-06-27 13:22:38 -07001790 segs = skb_gso_segment(skb, features);
1791
1792 /* Verifying header integrity only. */
1793 if (!segs)
1794 return 0;
1795
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001796 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001797 return PTR_ERR(segs);
1798
1799 skb->next = segs;
1800 DEV_GSO_CB(skb)->destructor = skb->destructor;
1801 skb->destructor = dev_gso_skb_destructor;
1802
1803 return 0;
1804}
1805
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001806int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1807 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001808{
Stephen Hemminger00829822008-11-20 20:14:53 -08001809 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001810 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001811
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001812 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001813 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001814 dev_queue_xmit_nit(skb, dev);
1815
Herbert Xu576a30e2006-06-27 13:22:38 -07001816 if (netif_needs_gso(dev, skb)) {
1817 if (unlikely(dev_gso_segment(skb)))
1818 goto out_kfree_skb;
1819 if (skb->next)
1820 goto gso;
1821 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001822
Eric Dumazet93f154b2009-05-18 22:19:19 -07001823 /*
1824 * If device doesnt need skb->dst, release it right now while
1825 * its hot in this cpu cache
1826 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001827 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1828 skb_dst_drop(skb);
1829
Patrick Ohlyac45f602009-02-12 05:03:37 +00001830 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001831 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001832 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001833 /*
1834 * TODO: if skb_orphan() was called by
1835 * dev->hard_start_xmit() (for example, the unmodified
1836 * igb driver does that; bnx2 doesn't), then
1837 * skb_tx_software_timestamp() will be unable to send
1838 * back the time stamp.
1839 *
1840 * How can this be prevented? Always create another
1841 * reference to the socket before calling
1842 * dev->hard_start_xmit()? Prevent that skb_orphan()
1843 * does anything in dev->hard_start_xmit() by clearing
1844 * the skb destructor before the call and restoring it
1845 * afterwards, then doing the skb_orphan() ourselves?
1846 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001847 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001848 }
1849
Herbert Xu576a30e2006-06-27 13:22:38 -07001850gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001851 do {
1852 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001853
1854 skb->next = nskb->next;
1855 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00001856
1857 /*
1858 * If device doesnt need nskb->dst, release it right now while
1859 * its hot in this cpu cache
1860 */
1861 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1862 skb_dst_drop(nskb);
1863
Stephen Hemminger00829822008-11-20 20:14:53 -08001864 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001865 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001866 if (rc & ~NETDEV_TX_MASK)
1867 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001868 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001869 skb->next = nskb;
1870 return rc;
1871 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001872 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001873 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001874 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001875 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001876
Patrick McHardy572a9d72009-11-10 06:14:14 +00001877out_kfree_gso_skb:
1878 if (likely(skb->next == NULL))
1879 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001880out_kfree_skb:
1881 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001882 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001883}
1884
David S. Miller70192982009-01-27 16:34:47 -08001885static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001886
Stephen Hemminger92477442009-03-21 13:39:26 -07001887u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001888{
David S. Miller70192982009-01-27 16:34:47 -08001889 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001890
David S. Miller513de112009-05-03 14:43:10 -07001891 if (skb_rx_queue_recorded(skb)) {
1892 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001893 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001894 hash -= dev->real_num_tx_queues;
1895 return hash;
1896 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001897
1898 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001899 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001900 else
David S. Miller70192982009-01-27 16:34:47 -08001901 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001902
David S. Miller70192982009-01-27 16:34:47 -08001903 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001904
David S. Millerb6b2fed2008-07-21 09:48:06 -07001905 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001906}
Stephen Hemminger92477442009-03-21 13:39:26 -07001907EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001908
Eric Dumazeted046422009-11-13 21:54:04 +00001909static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1910{
1911 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1912 if (net_ratelimit()) {
1913 WARN(1, "%s selects TX queue %d, but "
1914 "real number of TX queues is %d\n",
1915 dev->name, queue_index,
1916 dev->real_num_tx_queues);
1917 }
1918 return 0;
1919 }
1920 return queue_index;
1921}
1922
David S. Millere8a04642008-07-17 00:34:19 -07001923static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1924 struct sk_buff *skb)
1925{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001926 u16 queue_index;
1927 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001928
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001929 if (sk_tx_queue_recorded(sk)) {
1930 queue_index = sk_tx_queue_get(sk);
1931 } else {
1932 const struct net_device_ops *ops = dev->netdev_ops;
1933
1934 if (ops->ndo_select_queue) {
1935 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001936 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001937 } else {
1938 queue_index = 0;
1939 if (dev->real_num_tx_queues > 1)
1940 queue_index = skb_tx_hash(dev, skb);
1941
1942 if (sk && sk->sk_dst_cache)
1943 sk_tx_queue_set(sk, queue_index);
1944 }
1945 }
David S. Millereae792b2008-07-15 03:03:33 -07001946
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001947 skb_set_queue_mapping(skb, queue_index);
1948 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001949}
1950
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001951static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1952 struct net_device *dev,
1953 struct netdev_queue *txq)
1954{
1955 spinlock_t *root_lock = qdisc_lock(q);
1956 int rc;
1957
1958 spin_lock(root_lock);
1959 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1960 kfree_skb(skb);
1961 rc = NET_XMIT_DROP;
1962 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1963 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1964 /*
1965 * This is a work-conserving queue; there are no old skbs
1966 * waiting to be sent out; and the qdisc is not running -
1967 * xmit the skb directly.
1968 */
1969 __qdisc_update_bstats(q, skb->len);
1970 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1971 __qdisc_run(q);
1972 else
1973 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1974
1975 rc = NET_XMIT_SUCCESS;
1976 } else {
1977 rc = qdisc_enqueue_root(skb, q);
1978 qdisc_run(q);
1979 }
1980 spin_unlock(root_lock);
1981
1982 return rc;
1983}
1984
Krishna Kumar4b258462010-01-21 01:26:29 -08001985/*
1986 * Returns true if either:
1987 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1988 * 2. skb is fragmented and the device does not support SG, or if
1989 * at least one of fragments is in highmem and device does not
1990 * support DMA from it.
1991 */
1992static inline int skb_needs_linearize(struct sk_buff *skb,
1993 struct net_device *dev)
1994{
1995 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1996 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1997 illegal_highdma(dev, skb)));
1998}
1999
Dave Jonesd29f7492008-07-22 14:09:06 -07002000/**
2001 * dev_queue_xmit - transmit a buffer
2002 * @skb: buffer to transmit
2003 *
2004 * Queue a buffer for transmission to a network device. The caller must
2005 * have set the device and priority and built the buffer before calling
2006 * this function. The function can be called from an interrupt.
2007 *
2008 * A negative errno code is returned on a failure. A success does not
2009 * guarantee the frame will be transmitted as it may be dropped due
2010 * to congestion or traffic shaping.
2011 *
2012 * -----------------------------------------------------------------------------------
2013 * I notice this method can also return errors from the queue disciplines,
2014 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2015 * be positive.
2016 *
2017 * Regardless of the return value, the skb is consumed, so it is currently
2018 * difficult to retry a send to this method. (You can bump the ref count
2019 * before sending to hold a reference for retry if you are careful.)
2020 *
2021 * When calling this method, interrupts MUST be enabled. This is because
2022 * the BH enable code must have IRQs enabled so that it will not deadlock.
2023 * --BLG
2024 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025int dev_queue_xmit(struct sk_buff *skb)
2026{
2027 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002028 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 struct Qdisc *q;
2030 int rc = -ENOMEM;
2031
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002032 /* GSO will handle the following emulations directly. */
2033 if (netif_needs_gso(dev, skb))
2034 goto gso;
2035
Krishna Kumar4b258462010-01-21 01:26:29 -08002036 /* Convert a paged skb to linear, if required */
2037 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 goto out_kfree_skb;
2039
2040 /* If packet is not checksummed and device does not support
2041 * checksumming for this protocol, complete checksumming here.
2042 */
Herbert Xu663ead32007-04-09 11:59:07 -07002043 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2044 skb_set_transport_header(skb, skb->csum_start -
2045 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002046 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2047 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002048 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002050gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002051 /* Disable soft irqs for various locks below. Also
2052 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002054 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
David S. Millereae792b2008-07-15 03:03:33 -07002056 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07002057 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002060 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061#endif
2062 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002063 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002064 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 }
2066
2067 /* The device has no queue. Common case for software devices:
2068 loopback, all the sorts of tunnels...
2069
Herbert Xu932ff272006-06-09 12:20:56 -07002070 Really, it is unlikely that netif_tx_lock protection is necessary
2071 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 counters.)
2073 However, it is possible, that they rely on protection
2074 made by us here.
2075
2076 Check this and shot the lock. It is not prone from deadlocks.
2077 Either shot noqueue qdisc, it is even simpler 8)
2078 */
2079 if (dev->flags & IFF_UP) {
2080 int cpu = smp_processor_id(); /* ok because BHs are off */
2081
David S. Millerc773e842008-07-08 23:13:53 -07002082 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
David S. Millerc773e842008-07-08 23:13:53 -07002084 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002086 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002087 rc = dev_hard_start_xmit(skb, dev, txq);
2088 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002089 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 goto out;
2091 }
2092 }
David S. Millerc773e842008-07-08 23:13:53 -07002093 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 if (net_ratelimit())
2095 printk(KERN_CRIT "Virtual device %s asks to "
2096 "queue packet!\n", dev->name);
2097 } else {
2098 /* Recursion is detected! It is possible,
2099 * unfortunately */
2100 if (net_ratelimit())
2101 printk(KERN_CRIT "Dead loop on virtual device "
2102 "%s, fix it urgently!\n", dev->name);
2103 }
2104 }
2105
2106 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002107 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109out_kfree_skb:
2110 kfree_skb(skb);
2111 return rc;
2112out:
Herbert Xud4828d82006-06-22 02:28:18 -07002113 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 return rc;
2115}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002116EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118
2119/*=======================================================================
2120 Receiver routines
2121 =======================================================================*/
2122
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002123int netdev_max_backlog __read_mostly = 1000;
2124int netdev_budget __read_mostly = 300;
2125int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2128
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130/**
2131 * netif_rx - post buffer to the network code
2132 * @skb: buffer to post
2133 *
2134 * This function receives a packet from a device driver and queues it for
2135 * the upper (protocol) levels to process. It always succeeds. The buffer
2136 * may be dropped during processing for congestion control or by the
2137 * protocol layers.
2138 *
2139 * return values:
2140 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 * NET_RX_DROP (packet was dropped)
2142 *
2143 */
2144
2145int netif_rx(struct sk_buff *skb)
2146{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 struct softnet_data *queue;
2148 unsigned long flags;
2149
2150 /* if netpoll wants it, pretend we never saw it */
2151 if (netpoll_rx(skb))
2152 return NET_RX_DROP;
2153
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002154 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002155 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157 /*
2158 * The code is rearranged so that the path is the most
2159 * short when CPU is congested, but is still operating.
2160 */
2161 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 queue = &__get_cpu_var(softnet_data);
2163
2164 __get_cpu_var(netdev_rx_stat).total++;
2165 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2166 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002170 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 }
2172
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002173 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 goto enqueue;
2175 }
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 __get_cpu_var(netdev_rx_stat).dropped++;
2178 local_irq_restore(flags);
2179
2180 kfree_skb(skb);
2181 return NET_RX_DROP;
2182}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002183EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
2185int netif_rx_ni(struct sk_buff *skb)
2186{
2187 int err;
2188
2189 preempt_disable();
2190 err = netif_rx(skb);
2191 if (local_softirq_pending())
2192 do_softirq();
2193 preempt_enable();
2194
2195 return err;
2196}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197EXPORT_SYMBOL(netif_rx_ni);
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199static void net_tx_action(struct softirq_action *h)
2200{
2201 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2202
2203 if (sd->completion_queue) {
2204 struct sk_buff *clist;
2205
2206 local_irq_disable();
2207 clist = sd->completion_queue;
2208 sd->completion_queue = NULL;
2209 local_irq_enable();
2210
2211 while (clist) {
2212 struct sk_buff *skb = clist;
2213 clist = clist->next;
2214
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002215 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 __kfree_skb(skb);
2217 }
2218 }
2219
2220 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002221 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 local_irq_disable();
2224 head = sd->output_queue;
2225 sd->output_queue = NULL;
2226 local_irq_enable();
2227
2228 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002229 struct Qdisc *q = head;
2230 spinlock_t *root_lock;
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 head = head->next_sched;
2233
David S. Miller5fb66222008-08-02 20:02:43 -07002234 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002235 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002236 smp_mb__before_clear_bit();
2237 clear_bit(__QDISC_STATE_SCHED,
2238 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002239 qdisc_run(q);
2240 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002242 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002243 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002244 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002245 } else {
2246 smp_mb__before_clear_bit();
2247 clear_bit(__QDISC_STATE_SCHED,
2248 &q->state);
2249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 }
2251 }
2252 }
2253}
2254
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002255static inline int deliver_skb(struct sk_buff *skb,
2256 struct packet_type *pt_prev,
2257 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
2259 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002260 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262
2263#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002264
2265#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2266/* This hook is defined here for ATM LANE */
2267int (*br_fdb_test_addr_hook)(struct net_device *dev,
2268 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002269EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002270#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Stephen Hemminger6229e362007-03-21 13:38:47 -07002272/*
2273 * If bridge module is loaded call bridging hook.
2274 * returns NULL if packet was consumed.
2275 */
2276struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2277 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002278EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002279
Stephen Hemminger6229e362007-03-21 13:38:47 -07002280static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2281 struct packet_type **pt_prev, int *ret,
2282 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283{
2284 struct net_bridge_port *port;
2285
Stephen Hemminger6229e362007-03-21 13:38:47 -07002286 if (skb->pkt_type == PACKET_LOOPBACK ||
2287 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2288 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002291 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002293 }
2294
Stephen Hemminger6229e362007-03-21 13:38:47 -07002295 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296}
2297#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002298#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299#endif
2300
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002301#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2302struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2303EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2304
2305static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2306 struct packet_type **pt_prev,
2307 int *ret,
2308 struct net_device *orig_dev)
2309{
2310 if (skb->dev->macvlan_port == NULL)
2311 return skb;
2312
2313 if (*pt_prev) {
2314 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2315 *pt_prev = NULL;
2316 }
2317 return macvlan_handle_frame_hook(skb);
2318}
2319#else
2320#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2321#endif
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323#ifdef CONFIG_NET_CLS_ACT
2324/* TODO: Maybe we should just force sch_ingress to be compiled in
2325 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2326 * a compare and 2 stores extra right now if we dont have it on
2327 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002328 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 * the ingress scheduler, you just cant add policies on ingress.
2330 *
2331 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002332static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002335 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002336 struct netdev_queue *rxq;
2337 int result = TC_ACT_OK;
2338 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002339
Herbert Xuf697c3e2007-10-14 00:38:47 -07002340 if (MAX_RED_LOOP < ttl++) {
2341 printk(KERN_WARNING
2342 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002343 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002344 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 }
2346
Herbert Xuf697c3e2007-10-14 00:38:47 -07002347 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2348 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2349
David S. Miller555353c2008-07-08 17:33:13 -07002350 rxq = &dev->rx_queue;
2351
David S. Miller83874002008-07-17 00:53:03 -07002352 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002353 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002354 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002355 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2356 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002357 spin_unlock(qdisc_lock(q));
2358 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002359
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 return result;
2361}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002362
2363static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2364 struct packet_type **pt_prev,
2365 int *ret, struct net_device *orig_dev)
2366{
David S. Miller8d50b532008-07-30 02:37:46 -07002367 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002368 goto out;
2369
2370 if (*pt_prev) {
2371 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2372 *pt_prev = NULL;
2373 } else {
2374 /* Huh? Why does turning on AF_PACKET affect this? */
2375 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2376 }
2377
2378 switch (ing_filter(skb)) {
2379 case TC_ACT_SHOT:
2380 case TC_ACT_STOLEN:
2381 kfree_skb(skb);
2382 return NULL;
2383 }
2384
2385out:
2386 skb->tc_verd = 0;
2387 return skb;
2388}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389#endif
2390
Patrick McHardybc1d0412008-07-14 22:49:30 -07002391/*
2392 * netif_nit_deliver - deliver received packets to network taps
2393 * @skb: buffer
2394 *
2395 * This function is used to deliver incoming packets to network
2396 * taps. It should be used when the normal netif_receive_skb path
2397 * is bypassed, for example because of VLAN acceleration.
2398 */
2399void netif_nit_deliver(struct sk_buff *skb)
2400{
2401 struct packet_type *ptype;
2402
2403 if (list_empty(&ptype_all))
2404 return;
2405
2406 skb_reset_network_header(skb);
2407 skb_reset_transport_header(skb);
2408 skb->mac_len = skb->network_header - skb->mac_header;
2409
2410 rcu_read_lock();
2411 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2412 if (!ptype->dev || ptype->dev == skb->dev)
2413 deliver_skb(skb, ptype, skb->dev);
2414 }
2415 rcu_read_unlock();
2416}
2417
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002418/**
2419 * netif_receive_skb - process receive buffer from network
2420 * @skb: buffer to process
2421 *
2422 * netif_receive_skb() is the main receive data processing function.
2423 * It always succeeds. The buffer may be dropped during processing
2424 * for congestion control or by the protocol layers.
2425 *
2426 * This function may only be called from softirq context and interrupts
2427 * should be enabled.
2428 *
2429 * Return values (usually ignored):
2430 * NET_RX_SUCCESS: no congestion
2431 * NET_RX_DROP: packet was dropped
2432 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433int netif_receive_skb(struct sk_buff *skb)
2434{
2435 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002436 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002437 struct net_device *null_or_orig;
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002438 struct net_device *null_or_bond;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002440 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002442 if (!skb->tstamp.tv64)
2443 net_timestamp(skb);
2444
Eric Dumazet05423b22009-10-26 18:40:35 -07002445 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002446 return NET_RX_SUCCESS;
2447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002449 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 return NET_RX_DROP;
2451
Eric Dumazet8964be42009-11-20 15:35:04 -08002452 if (!skb->skb_iif)
2453 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002454
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002455 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002456 orig_dev = skb->dev;
2457 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002458 if (skb_bond_should_drop(skb))
2459 null_or_orig = orig_dev; /* deliver only exact match */
2460 else
2461 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002462 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002463
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 __get_cpu_var(netdev_rx_stat).total++;
2465
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002466 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002467 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002468 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470 pt_prev = NULL;
2471
2472 rcu_read_lock();
2473
2474#ifdef CONFIG_NET_CLS_ACT
2475 if (skb->tc_verd & TC_NCLS) {
2476 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2477 goto ncls;
2478 }
2479#endif
2480
2481 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002482 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2483 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002484 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002485 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 pt_prev = ptype;
2487 }
2488 }
2489
2490#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002491 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2492 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494ncls:
2495#endif
2496
Stephen Hemminger6229e362007-03-21 13:38:47 -07002497 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2498 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002500 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2501 if (!skb)
2502 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002504 /*
2505 * Make sure frames received on VLAN interfaces stacked on
2506 * bonding interfaces still make their way to any base bonding
2507 * device that may have registered for a specific ptype. The
2508 * handler may have to adjust skb->dev and orig_dev.
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002509 */
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002510 null_or_bond = NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002511 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2512 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002513 null_or_bond = vlan_dev_real_dev(skb->dev);
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002514 }
2515
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002517 list_for_each_entry_rcu(ptype,
2518 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002519 if (ptype->type == type && (ptype->dev == null_or_orig ||
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002520 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2521 ptype->dev == null_or_bond)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002522 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002523 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 pt_prev = ptype;
2525 }
2526 }
2527
2528 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002529 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 } else {
2531 kfree_skb(skb);
2532 /* Jamal, now you will not able to escape explaining
2533 * me how you were going to use this. :-)
2534 */
2535 ret = NET_RX_DROP;
2536 }
2537
2538out:
2539 rcu_read_unlock();
2540 return ret;
2541}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002542EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002544/* Network device is going away, flush any packets still pending */
2545static void flush_backlog(void *arg)
2546{
2547 struct net_device *dev = arg;
2548 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2549 struct sk_buff *skb, *tmp;
2550
2551 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2552 if (skb->dev == dev) {
2553 __skb_unlink(skb, &queue->input_pkt_queue);
2554 kfree_skb(skb);
2555 }
2556}
2557
Herbert Xud565b0a2008-12-15 23:38:52 -08002558static int napi_gro_complete(struct sk_buff *skb)
2559{
2560 struct packet_type *ptype;
2561 __be16 type = skb->protocol;
2562 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2563 int err = -ENOENT;
2564
Herbert Xufc59f9a2009-04-14 15:11:06 -07002565 if (NAPI_GRO_CB(skb)->count == 1) {
2566 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002567 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002568 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002569
2570 rcu_read_lock();
2571 list_for_each_entry_rcu(ptype, head, list) {
2572 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2573 continue;
2574
2575 err = ptype->gro_complete(skb);
2576 break;
2577 }
2578 rcu_read_unlock();
2579
2580 if (err) {
2581 WARN_ON(&ptype->list == head);
2582 kfree_skb(skb);
2583 return NET_RX_SUCCESS;
2584 }
2585
2586out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002587 return netif_receive_skb(skb);
2588}
2589
David S. Miller11380a42010-01-19 13:46:10 -08002590static void napi_gro_flush(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08002591{
2592 struct sk_buff *skb, *next;
2593
2594 for (skb = napi->gro_list; skb; skb = next) {
2595 next = skb->next;
2596 skb->next = NULL;
2597 napi_gro_complete(skb);
2598 }
2599
Herbert Xu4ae55442009-02-08 18:00:36 +00002600 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002601 napi->gro_list = NULL;
2602}
Herbert Xud565b0a2008-12-15 23:38:52 -08002603
Ben Hutchings5b252f02009-10-29 07:17:09 +00002604enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002605{
2606 struct sk_buff **pp = NULL;
2607 struct packet_type *ptype;
2608 __be16 type = skb->protocol;
2609 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002610 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002611 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002612 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002613
2614 if (!(skb->dev->features & NETIF_F_GRO))
2615 goto normal;
2616
David S. Miller4cf704f2009-06-09 00:18:51 -07002617 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002618 goto normal;
2619
Herbert Xud565b0a2008-12-15 23:38:52 -08002620 rcu_read_lock();
2621 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002622 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2623 continue;
2624
Herbert Xu86911732009-01-29 14:19:50 +00002625 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002626 mac_len = skb->network_header - skb->mac_header;
2627 skb->mac_len = mac_len;
2628 NAPI_GRO_CB(skb)->same_flow = 0;
2629 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002630 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002631
Herbert Xud565b0a2008-12-15 23:38:52 -08002632 pp = ptype->gro_receive(&napi->gro_list, skb);
2633 break;
2634 }
2635 rcu_read_unlock();
2636
2637 if (&ptype->list == head)
2638 goto normal;
2639
Herbert Xu0da2afd52008-12-26 14:57:42 -08002640 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002641 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002642
Herbert Xud565b0a2008-12-15 23:38:52 -08002643 if (pp) {
2644 struct sk_buff *nskb = *pp;
2645
2646 *pp = nskb->next;
2647 nskb->next = NULL;
2648 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002649 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002650 }
2651
Herbert Xu0da2afd52008-12-26 14:57:42 -08002652 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002653 goto ok;
2654
Herbert Xu4ae55442009-02-08 18:00:36 +00002655 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002656 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002657
Herbert Xu4ae55442009-02-08 18:00:36 +00002658 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002659 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002660 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002661 skb->next = napi->gro_list;
2662 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002663 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002664
Herbert Xuad0f9902009-02-01 01:24:55 -08002665pull:
Herbert Xucb189782009-05-26 18:50:31 +00002666 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2667 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2668
2669 BUG_ON(skb->end - skb->tail < grow);
2670
2671 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2672
2673 skb->tail += grow;
2674 skb->data_len -= grow;
2675
2676 skb_shinfo(skb)->frags[0].page_offset += grow;
2677 skb_shinfo(skb)->frags[0].size -= grow;
2678
2679 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2680 put_page(skb_shinfo(skb)->frags[0].page);
2681 memmove(skb_shinfo(skb)->frags,
2682 skb_shinfo(skb)->frags + 1,
2683 --skb_shinfo(skb)->nr_frags);
2684 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002685 }
2686
Herbert Xud565b0a2008-12-15 23:38:52 -08002687ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002688 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002689
2690normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002691 ret = GRO_NORMAL;
2692 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002693}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002694EXPORT_SYMBOL(dev_gro_receive);
2695
Ben Hutchings5b252f02009-10-29 07:17:09 +00002696static gro_result_t
2697__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002698{
2699 struct sk_buff *p;
2700
Herbert Xud1c76af2009-03-16 10:50:02 -07002701 if (netpoll_rx_on(skb))
2702 return GRO_NORMAL;
2703
Herbert Xu96e93ea2009-01-06 10:49:34 -08002704 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08002705 NAPI_GRO_CB(p)->same_flow =
2706 (p->dev == skb->dev) &&
2707 !compare_ether_header(skb_mac_header(p),
2708 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002709 NAPI_GRO_CB(p)->flush = 0;
2710 }
2711
2712 return dev_gro_receive(napi, skb);
2713}
Herbert Xu5d38a072009-01-04 16:13:40 -08002714
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002715gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002716{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002717 switch (ret) {
2718 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002719 if (netif_receive_skb(skb))
2720 ret = GRO_DROP;
2721 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002722
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002723 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002724 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002725 kfree_skb(skb);
2726 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002727
2728 case GRO_HELD:
2729 case GRO_MERGED:
2730 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002731 }
2732
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002733 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002734}
2735EXPORT_SYMBOL(napi_skb_finish);
2736
Herbert Xu78a478d2009-05-26 18:50:21 +00002737void skb_gro_reset_offset(struct sk_buff *skb)
2738{
2739 NAPI_GRO_CB(skb)->data_offset = 0;
2740 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002741 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002742
Herbert Xu78d3fd02009-05-26 18:50:23 +00002743 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002744 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002745 NAPI_GRO_CB(skb)->frag0 =
2746 page_address(skb_shinfo(skb)->frags[0].page) +
2747 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002748 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2749 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002750}
2751EXPORT_SYMBOL(skb_gro_reset_offset);
2752
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002753gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002754{
Herbert Xu86911732009-01-29 14:19:50 +00002755 skb_gro_reset_offset(skb);
2756
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002757 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002758}
2759EXPORT_SYMBOL(napi_gro_receive);
2760
Herbert Xu96e93ea2009-01-06 10:49:34 -08002761void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2762{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002763 __skb_pull(skb, skb_headlen(skb));
2764 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2765
2766 napi->skb = skb;
2767}
2768EXPORT_SYMBOL(napi_reuse_skb);
2769
Herbert Xu76620aa2009-04-16 02:02:07 -07002770struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002771{
Herbert Xu5d38a072009-01-04 16:13:40 -08002772 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002773
2774 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002775 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2776 if (skb)
2777 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002778 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002779 return skb;
2780}
Herbert Xu76620aa2009-04-16 02:02:07 -07002781EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002782
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002783gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2784 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002785{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002786 switch (ret) {
2787 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002788 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002789 skb->protocol = eth_type_trans(skb, napi->dev);
2790
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002791 if (ret == GRO_HELD)
2792 skb_gro_pull(skb, -ETH_HLEN);
2793 else if (netif_receive_skb(skb))
2794 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002795 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002796
2797 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002798 case GRO_MERGED_FREE:
2799 napi_reuse_skb(napi, skb);
2800 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002801
2802 case GRO_MERGED:
2803 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002804 }
2805
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002806 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002807}
2808EXPORT_SYMBOL(napi_frags_finish);
2809
Herbert Xu76620aa2009-04-16 02:02:07 -07002810struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002811{
Herbert Xu76620aa2009-04-16 02:02:07 -07002812 struct sk_buff *skb = napi->skb;
2813 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002814 unsigned int hlen;
2815 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002816
2817 napi->skb = NULL;
2818
2819 skb_reset_mac_header(skb);
2820 skb_gro_reset_offset(skb);
2821
Herbert Xua5b1cf22009-05-26 18:50:28 +00002822 off = skb_gro_offset(skb);
2823 hlen = off + sizeof(*eth);
2824 eth = skb_gro_header_fast(skb, off);
2825 if (skb_gro_header_hard(skb, hlen)) {
2826 eth = skb_gro_header_slow(skb, hlen, off);
2827 if (unlikely(!eth)) {
2828 napi_reuse_skb(napi, skb);
2829 skb = NULL;
2830 goto out;
2831 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002832 }
2833
2834 skb_gro_pull(skb, sizeof(*eth));
2835
2836 /*
2837 * This works because the only protocols we care about don't require
2838 * special handling. We'll fix it up properly at the end.
2839 */
2840 skb->protocol = eth->h_proto;
2841
2842out:
2843 return skb;
2844}
2845EXPORT_SYMBOL(napi_frags_skb);
2846
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002847gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002848{
2849 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002850
2851 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002852 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002853
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002854 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002855}
2856EXPORT_SYMBOL(napi_gro_frags);
2857
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002858static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859{
2860 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2862 unsigned long start_time = jiffies;
2863
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002864 napi->weight = weight_p;
2865 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
2868 local_irq_disable();
2869 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002870 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002871 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002872 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002873 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 local_irq_enable();
2876
Herbert Xu8f1ead22009-03-26 00:59:10 -07002877 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002878 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002880 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881}
2882
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002883/**
2884 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002885 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002886 *
2887 * The entry's receive function will be scheduled to run
2888 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002889void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002890{
2891 unsigned long flags;
2892
2893 local_irq_save(flags);
2894 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2895 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2896 local_irq_restore(flags);
2897}
2898EXPORT_SYMBOL(__napi_schedule);
2899
Herbert Xud565b0a2008-12-15 23:38:52 -08002900void __napi_complete(struct napi_struct *n)
2901{
2902 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2903 BUG_ON(n->gro_list);
2904
2905 list_del(&n->poll_list);
2906 smp_mb__before_clear_bit();
2907 clear_bit(NAPI_STATE_SCHED, &n->state);
2908}
2909EXPORT_SYMBOL(__napi_complete);
2910
2911void napi_complete(struct napi_struct *n)
2912{
2913 unsigned long flags;
2914
2915 /*
2916 * don't let napi dequeue from the cpu poll list
2917 * just in case its running on a different cpu
2918 */
2919 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2920 return;
2921
2922 napi_gro_flush(n);
2923 local_irq_save(flags);
2924 __napi_complete(n);
2925 local_irq_restore(flags);
2926}
2927EXPORT_SYMBOL(napi_complete);
2928
2929void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2930 int (*poll)(struct napi_struct *, int), int weight)
2931{
2932 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002933 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002934 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002935 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002936 napi->poll = poll;
2937 napi->weight = weight;
2938 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002939 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002940#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002941 spin_lock_init(&napi->poll_lock);
2942 napi->poll_owner = -1;
2943#endif
2944 set_bit(NAPI_STATE_SCHED, &napi->state);
2945}
2946EXPORT_SYMBOL(netif_napi_add);
2947
2948void netif_napi_del(struct napi_struct *napi)
2949{
2950 struct sk_buff *skb, *next;
2951
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002952 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002953 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002954
2955 for (skb = napi->gro_list; skb; skb = next) {
2956 next = skb->next;
2957 skb->next = NULL;
2958 kfree_skb(skb);
2959 }
2960
2961 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002962 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002963}
2964EXPORT_SYMBOL(netif_napi_del);
2965
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002966
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967static void net_rx_action(struct softirq_action *h)
2968{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002969 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002970 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002971 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002972 void *have;
2973
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 local_irq_disable();
2975
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002976 while (!list_empty(list)) {
2977 struct napi_struct *n;
2978 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002980 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002981 * Allow this to run for 2 jiffies since which will allow
2982 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002983 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002984 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 goto softnet_break;
2986
2987 local_irq_enable();
2988
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002989 /* Even though interrupts have been re-enabled, this
2990 * access is safe because interrupts can only add new
2991 * entries to the tail of this list, and only ->poll()
2992 * calls can remove this head entry from the list.
2993 */
2994 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002996 have = netpoll_poll_lock(n);
2997
2998 weight = n->weight;
2999
David S. Miller0a7606c2007-10-29 21:28:47 -07003000 /* This NAPI_STATE_SCHED test is for avoiding a race
3001 * with netpoll's poll_napi(). Only the entity which
3002 * obtains the lock and sees NAPI_STATE_SCHED set will
3003 * actually make the ->poll() call. Therefore we avoid
3004 * accidently calling ->poll() when NAPI is not scheduled.
3005 */
3006 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00003007 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07003008 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00003009 trace_napi_poll(n);
3010 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003011
3012 WARN_ON_ONCE(work > weight);
3013
3014 budget -= work;
3015
3016 local_irq_disable();
3017
3018 /* Drivers must not modify the NAPI state if they
3019 * consume the entire weight. In such cases this code
3020 * still "owns" the NAPI instance and therefore can
3021 * move the instance around on the list at-will.
3022 */
David S. Millerfed17f32008-01-07 21:00:40 -08003023 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07003024 if (unlikely(napi_disable_pending(n))) {
3025 local_irq_enable();
3026 napi_complete(n);
3027 local_irq_disable();
3028 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003029 list_move_tail(&n->poll_list, list);
3030 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003031
3032 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 }
3034out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07003035 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003036
Chris Leechdb217332006-06-17 21:24:58 -07003037#ifdef CONFIG_NET_DMA
3038 /*
3039 * There may not be any more sk_buffs coming right now, so push
3040 * any pending DMA copies to hardware
3041 */
Dan Williams2ba05622009-01-06 11:38:14 -07003042 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003043#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 return;
3046
3047softnet_break:
3048 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3049 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3050 goto out;
3051}
3052
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003053static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
3055/**
3056 * register_gifconf - register a SIOCGIF handler
3057 * @family: Address family
3058 * @gifconf: Function handler
3059 *
3060 * Register protocol dependent address dumping routines. The handler
3061 * that is passed must not be freed or reused until it has been replaced
3062 * by another handler.
3063 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003064int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065{
3066 if (family >= NPROTO)
3067 return -EINVAL;
3068 gifconf_list[family] = gifconf;
3069 return 0;
3070}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003071EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072
3073
3074/*
3075 * Map an interface index to its name (SIOCGIFNAME)
3076 */
3077
3078/*
3079 * We need this ioctl for efficient implementation of the
3080 * if_indextoname() function required by the IPv6 API. Without
3081 * it, we would have to search all the interfaces to find a
3082 * match. --pb
3083 */
3084
Eric W. Biederman881d9662007-09-17 11:56:21 -07003085static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086{
3087 struct net_device *dev;
3088 struct ifreq ifr;
3089
3090 /*
3091 * Fetch the caller's info block.
3092 */
3093
3094 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3095 return -EFAULT;
3096
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003097 rcu_read_lock();
3098 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003100 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 return -ENODEV;
3102 }
3103
3104 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003105 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106
3107 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3108 return -EFAULT;
3109 return 0;
3110}
3111
3112/*
3113 * Perform a SIOCGIFCONF call. This structure will change
3114 * size eventually, and there is nothing I can do about it.
3115 * Thus we will need a 'compatibility mode'.
3116 */
3117
Eric W. Biederman881d9662007-09-17 11:56:21 -07003118static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119{
3120 struct ifconf ifc;
3121 struct net_device *dev;
3122 char __user *pos;
3123 int len;
3124 int total;
3125 int i;
3126
3127 /*
3128 * Fetch the caller's info block.
3129 */
3130
3131 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3132 return -EFAULT;
3133
3134 pos = ifc.ifc_buf;
3135 len = ifc.ifc_len;
3136
3137 /*
3138 * Loop over the interfaces, and write an info block for each.
3139 */
3140
3141 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003142 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 for (i = 0; i < NPROTO; i++) {
3144 if (gifconf_list[i]) {
3145 int done;
3146 if (!pos)
3147 done = gifconf_list[i](dev, NULL, 0);
3148 else
3149 done = gifconf_list[i](dev, pos + total,
3150 len - total);
3151 if (done < 0)
3152 return -EFAULT;
3153 total += done;
3154 }
3155 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
3158 /*
3159 * All done. Write the updated control block back to the caller.
3160 */
3161 ifc.ifc_len = total;
3162
3163 /*
3164 * Both BSD and Solaris return 0 here, so we do too.
3165 */
3166 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3167}
3168
3169#ifdef CONFIG_PROC_FS
3170/*
3171 * This is invoked by the /proc filesystem handler to display a device
3172 * in detail.
3173 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003175 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176{
Denis V. Luneve372c412007-11-19 22:31:54 -08003177 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003178 loff_t off;
3179 struct net_device *dev;
3180
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003181 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003182 if (!*pos)
3183 return SEQ_START_TOKEN;
3184
3185 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003186 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003187 if (off++ == *pos)
3188 return dev;
3189
3190 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191}
3192
3193void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3194{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003195 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3196 first_net_device(seq_file_net(seq)) :
3197 next_net_device((struct net_device *)v);
3198
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003200 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201}
3202
3203void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003204 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003206 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207}
3208
3209static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3210{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003211 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212
Jesper Dangaard Brouer2d13baf2010-01-05 05:50:52 +00003213 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
Rusty Russell5a1b5892007-04-28 21:04:03 -07003214 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3215 dev->name, stats->rx_bytes, stats->rx_packets,
3216 stats->rx_errors,
3217 stats->rx_dropped + stats->rx_missed_errors,
3218 stats->rx_fifo_errors,
3219 stats->rx_length_errors + stats->rx_over_errors +
3220 stats->rx_crc_errors + stats->rx_frame_errors,
3221 stats->rx_compressed, stats->multicast,
3222 stats->tx_bytes, stats->tx_packets,
3223 stats->tx_errors, stats->tx_dropped,
3224 stats->tx_fifo_errors, stats->collisions,
3225 stats->tx_carrier_errors +
3226 stats->tx_aborted_errors +
3227 stats->tx_window_errors +
3228 stats->tx_heartbeat_errors,
3229 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230}
3231
3232/*
3233 * Called from the PROCfs module. This now uses the new arbitrary sized
3234 * /proc/net interface to create /proc/net/dev
3235 */
3236static int dev_seq_show(struct seq_file *seq, void *v)
3237{
3238 if (v == SEQ_START_TOKEN)
3239 seq_puts(seq, "Inter-| Receive "
3240 " | Transmit\n"
3241 " face |bytes packets errs drop fifo frame "
3242 "compressed multicast|bytes packets errs "
3243 "drop fifo colls carrier compressed\n");
3244 else
3245 dev_seq_printf_stats(seq, v);
3246 return 0;
3247}
3248
3249static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3250{
3251 struct netif_rx_stats *rc = NULL;
3252
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003253 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003254 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 rc = &per_cpu(netdev_rx_stat, *pos);
3256 break;
3257 } else
3258 ++*pos;
3259 return rc;
3260}
3261
3262static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3263{
3264 return softnet_get_online(pos);
3265}
3266
3267static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3268{
3269 ++*pos;
3270 return softnet_get_online(pos);
3271}
3272
3273static void softnet_seq_stop(struct seq_file *seq, void *v)
3274{
3275}
3276
3277static int softnet_seq_show(struct seq_file *seq, void *v)
3278{
3279 struct netif_rx_stats *s = v;
3280
3281 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003282 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003283 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003284 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 return 0;
3286}
3287
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003288static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 .start = dev_seq_start,
3290 .next = dev_seq_next,
3291 .stop = dev_seq_stop,
3292 .show = dev_seq_show,
3293};
3294
3295static int dev_seq_open(struct inode *inode, struct file *file)
3296{
Denis V. Luneve372c412007-11-19 22:31:54 -08003297 return seq_open_net(inode, file, &dev_seq_ops,
3298 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299}
3300
Arjan van de Ven9a321442007-02-12 00:55:35 -08003301static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302 .owner = THIS_MODULE,
3303 .open = dev_seq_open,
3304 .read = seq_read,
3305 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003306 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307};
3308
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003309static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 .start = softnet_seq_start,
3311 .next = softnet_seq_next,
3312 .stop = softnet_seq_stop,
3313 .show = softnet_seq_show,
3314};
3315
3316static int softnet_seq_open(struct inode *inode, struct file *file)
3317{
3318 return seq_open(file, &softnet_seq_ops);
3319}
3320
Arjan van de Ven9a321442007-02-12 00:55:35 -08003321static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 .owner = THIS_MODULE,
3323 .open = softnet_seq_open,
3324 .read = seq_read,
3325 .llseek = seq_lseek,
3326 .release = seq_release,
3327};
3328
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003329static void *ptype_get_idx(loff_t pos)
3330{
3331 struct packet_type *pt = NULL;
3332 loff_t i = 0;
3333 int t;
3334
3335 list_for_each_entry_rcu(pt, &ptype_all, list) {
3336 if (i == pos)
3337 return pt;
3338 ++i;
3339 }
3340
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003341 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003342 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3343 if (i == pos)
3344 return pt;
3345 ++i;
3346 }
3347 }
3348 return NULL;
3349}
3350
3351static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003352 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003353{
3354 rcu_read_lock();
3355 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3356}
3357
3358static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3359{
3360 struct packet_type *pt;
3361 struct list_head *nxt;
3362 int hash;
3363
3364 ++*pos;
3365 if (v == SEQ_START_TOKEN)
3366 return ptype_get_idx(0);
3367
3368 pt = v;
3369 nxt = pt->list.next;
3370 if (pt->type == htons(ETH_P_ALL)) {
3371 if (nxt != &ptype_all)
3372 goto found;
3373 hash = 0;
3374 nxt = ptype_base[0].next;
3375 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003376 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003377
3378 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003379 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003380 return NULL;
3381 nxt = ptype_base[hash].next;
3382 }
3383found:
3384 return list_entry(nxt, struct packet_type, list);
3385}
3386
3387static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003388 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003389{
3390 rcu_read_unlock();
3391}
3392
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003393static int ptype_seq_show(struct seq_file *seq, void *v)
3394{
3395 struct packet_type *pt = v;
3396
3397 if (v == SEQ_START_TOKEN)
3398 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003399 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003400 if (pt->type == htons(ETH_P_ALL))
3401 seq_puts(seq, "ALL ");
3402 else
3403 seq_printf(seq, "%04x", ntohs(pt->type));
3404
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003405 seq_printf(seq, " %-8s %pF\n",
3406 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003407 }
3408
3409 return 0;
3410}
3411
3412static const struct seq_operations ptype_seq_ops = {
3413 .start = ptype_seq_start,
3414 .next = ptype_seq_next,
3415 .stop = ptype_seq_stop,
3416 .show = ptype_seq_show,
3417};
3418
3419static int ptype_seq_open(struct inode *inode, struct file *file)
3420{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003421 return seq_open_net(inode, file, &ptype_seq_ops,
3422 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003423}
3424
3425static const struct file_operations ptype_seq_fops = {
3426 .owner = THIS_MODULE,
3427 .open = ptype_seq_open,
3428 .read = seq_read,
3429 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003430 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003431};
3432
3433
Pavel Emelyanov46650792007-10-08 20:38:39 -07003434static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435{
3436 int rc = -ENOMEM;
3437
Eric W. Biederman881d9662007-09-17 11:56:21 -07003438 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003440 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003442 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003443 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003444
Eric W. Biederman881d9662007-09-17 11:56:21 -07003445 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003446 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 rc = 0;
3448out:
3449 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003450out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003451 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003453 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003455 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 goto out;
3457}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003458
Pavel Emelyanov46650792007-10-08 20:38:39 -07003459static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003460{
3461 wext_proc_exit(net);
3462
3463 proc_net_remove(net, "ptype");
3464 proc_net_remove(net, "softnet_stat");
3465 proc_net_remove(net, "dev");
3466}
3467
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003468static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003469 .init = dev_proc_net_init,
3470 .exit = dev_proc_net_exit,
3471};
3472
3473static int __init dev_proc_init(void)
3474{
3475 return register_pernet_subsys(&dev_proc_ops);
3476}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477#else
3478#define dev_proc_init() 0
3479#endif /* CONFIG_PROC_FS */
3480
3481
3482/**
3483 * netdev_set_master - set up master/slave pair
3484 * @slave: slave device
3485 * @master: new master device
3486 *
3487 * Changes the master device of the slave. Pass %NULL to break the
3488 * bonding. The caller must hold the RTNL semaphore. On a failure
3489 * a negative errno code is returned. On success the reference counts
3490 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3491 * function returns zero.
3492 */
3493int netdev_set_master(struct net_device *slave, struct net_device *master)
3494{
3495 struct net_device *old = slave->master;
3496
3497 ASSERT_RTNL();
3498
3499 if (master) {
3500 if (old)
3501 return -EBUSY;
3502 dev_hold(master);
3503 }
3504
3505 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003506
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 synchronize_net();
3508
3509 if (old)
3510 dev_put(old);
3511
3512 if (master)
3513 slave->flags |= IFF_SLAVE;
3514 else
3515 slave->flags &= ~IFF_SLAVE;
3516
3517 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3518 return 0;
3519}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003520EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003522static void dev_change_rx_flags(struct net_device *dev, int flags)
3523{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003524 const struct net_device_ops *ops = dev->netdev_ops;
3525
3526 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3527 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003528}
3529
Wang Chendad9b332008-06-18 01:48:28 -07003530static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003531{
3532 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003533 uid_t uid;
3534 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003535
Patrick McHardy24023452007-07-14 18:51:31 -07003536 ASSERT_RTNL();
3537
Wang Chendad9b332008-06-18 01:48:28 -07003538 dev->flags |= IFF_PROMISC;
3539 dev->promiscuity += inc;
3540 if (dev->promiscuity == 0) {
3541 /*
3542 * Avoid overflow.
3543 * If inc causes overflow, untouch promisc and return error.
3544 */
3545 if (inc < 0)
3546 dev->flags &= ~IFF_PROMISC;
3547 else {
3548 dev->promiscuity -= inc;
3549 printk(KERN_WARNING "%s: promiscuity touches roof, "
3550 "set promiscuity failed, promiscuity feature "
3551 "of device might be broken.\n", dev->name);
3552 return -EOVERFLOW;
3553 }
3554 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003555 if (dev->flags != old_flags) {
3556 printk(KERN_INFO "device %s %s promiscuous mode\n",
3557 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3558 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003559 if (audit_enabled) {
3560 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003561 audit_log(current->audit_context, GFP_ATOMIC,
3562 AUDIT_ANOM_PROMISCUOUS,
3563 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3564 dev->name, (dev->flags & IFF_PROMISC),
3565 (old_flags & IFF_PROMISC),
3566 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003567 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003568 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003569 }
Patrick McHardy24023452007-07-14 18:51:31 -07003570
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003571 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003572 }
Wang Chendad9b332008-06-18 01:48:28 -07003573 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003574}
3575
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576/**
3577 * dev_set_promiscuity - update promiscuity count on a device
3578 * @dev: device
3579 * @inc: modifier
3580 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003581 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 * remains above zero the interface remains promiscuous. Once it hits zero
3583 * the device reverts back to normal filtering operation. A negative inc
3584 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003585 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 */
Wang Chendad9b332008-06-18 01:48:28 -07003587int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588{
3589 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003590 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591
Wang Chendad9b332008-06-18 01:48:28 -07003592 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003593 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003594 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003595 if (dev->flags != old_flags)
3596 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003597 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003599EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
3601/**
3602 * dev_set_allmulti - update allmulti count on a device
3603 * @dev: device
3604 * @inc: modifier
3605 *
3606 * Add or remove reception of all multicast frames to a device. While the
3607 * count in the device remains above zero the interface remains listening
3608 * to all interfaces. Once it hits zero the device reverts back to normal
3609 * filtering operation. A negative @inc value is used to drop the counter
3610 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003611 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 */
3613
Wang Chendad9b332008-06-18 01:48:28 -07003614int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615{
3616 unsigned short old_flags = dev->flags;
3617
Patrick McHardy24023452007-07-14 18:51:31 -07003618 ASSERT_RTNL();
3619
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003621 dev->allmulti += inc;
3622 if (dev->allmulti == 0) {
3623 /*
3624 * Avoid overflow.
3625 * If inc causes overflow, untouch allmulti and return error.
3626 */
3627 if (inc < 0)
3628 dev->flags &= ~IFF_ALLMULTI;
3629 else {
3630 dev->allmulti -= inc;
3631 printk(KERN_WARNING "%s: allmulti touches roof, "
3632 "set allmulti failed, allmulti feature of "
3633 "device might be broken.\n", dev->name);
3634 return -EOVERFLOW;
3635 }
3636 }
Patrick McHardy24023452007-07-14 18:51:31 -07003637 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003638 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003639 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003640 }
Wang Chendad9b332008-06-18 01:48:28 -07003641 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003642}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003643EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003644
3645/*
3646 * Upload unicast and multicast address lists to device and
3647 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003648 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003649 * are present.
3650 */
3651void __dev_set_rx_mode(struct net_device *dev)
3652{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003653 const struct net_device_ops *ops = dev->netdev_ops;
3654
Patrick McHardy4417da62007-06-27 01:28:10 -07003655 /* dev_open will call this function so the list will stay sane. */
3656 if (!(dev->flags&IFF_UP))
3657 return;
3658
3659 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003660 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003661
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003662 if (ops->ndo_set_rx_mode)
3663 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003664 else {
3665 /* Unicast addresses changes may only happen under the rtnl,
3666 * therefore calling __dev_set_promiscuity here is safe.
3667 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003668 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003669 __dev_set_promiscuity(dev, 1);
3670 dev->uc_promisc = 1;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003671 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003672 __dev_set_promiscuity(dev, -1);
3673 dev->uc_promisc = 0;
3674 }
3675
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003676 if (ops->ndo_set_multicast_list)
3677 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003678 }
3679}
3680
3681void dev_set_rx_mode(struct net_device *dev)
3682{
David S. Millerb9e40852008-07-15 00:15:08 -07003683 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003684 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003685 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686}
3687
Jiri Pirkof001fde2009-05-05 02:48:28 +00003688/* hw addresses list handling functions */
3689
Jiri Pirko31278e72009-06-17 01:12:19 +00003690static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3691 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003692{
3693 struct netdev_hw_addr *ha;
3694 int alloc_size;
3695
3696 if (addr_len > MAX_ADDR_LEN)
3697 return -EINVAL;
3698
Jiri Pirko31278e72009-06-17 01:12:19 +00003699 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003700 if (!memcmp(ha->addr, addr, addr_len) &&
3701 ha->type == addr_type) {
3702 ha->refcount++;
3703 return 0;
3704 }
3705 }
3706
3707
Jiri Pirkof001fde2009-05-05 02:48:28 +00003708 alloc_size = sizeof(*ha);
3709 if (alloc_size < L1_CACHE_BYTES)
3710 alloc_size = L1_CACHE_BYTES;
3711 ha = kmalloc(alloc_size, GFP_ATOMIC);
3712 if (!ha)
3713 return -ENOMEM;
3714 memcpy(ha->addr, addr, addr_len);
3715 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003716 ha->refcount = 1;
3717 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003718 list_add_tail_rcu(&ha->list, &list->list);
3719 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003720 return 0;
3721}
3722
3723static void ha_rcu_free(struct rcu_head *head)
3724{
3725 struct netdev_hw_addr *ha;
3726
3727 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3728 kfree(ha);
3729}
3730
Jiri Pirko31278e72009-06-17 01:12:19 +00003731static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3732 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003733{
3734 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003735
Jiri Pirko31278e72009-06-17 01:12:19 +00003736 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003737 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003738 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003739 if (--ha->refcount)
3740 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003741 list_del_rcu(&ha->list);
3742 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003743 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003744 return 0;
3745 }
3746 }
3747 return -ENOENT;
3748}
3749
Jiri Pirko31278e72009-06-17 01:12:19 +00003750static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3751 struct netdev_hw_addr_list *from_list,
3752 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003753 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003754{
3755 int err;
3756 struct netdev_hw_addr *ha, *ha2;
3757 unsigned char type;
3758
Jiri Pirko31278e72009-06-17 01:12:19 +00003759 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003760 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003761 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003762 if (err)
3763 goto unroll;
3764 }
3765 return 0;
3766
3767unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003768 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003769 if (ha2 == ha)
3770 break;
3771 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003772 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003773 }
3774 return err;
3775}
3776
Jiri Pirko31278e72009-06-17 01:12:19 +00003777static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3778 struct netdev_hw_addr_list *from_list,
3779 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003780 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003781{
3782 struct netdev_hw_addr *ha;
3783 unsigned char type;
3784
Jiri Pirko31278e72009-06-17 01:12:19 +00003785 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003786 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003787 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003788 }
3789}
3790
Jiri Pirko31278e72009-06-17 01:12:19 +00003791static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3792 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003793 int addr_len)
3794{
3795 int err = 0;
3796 struct netdev_hw_addr *ha, *tmp;
3797
Jiri Pirko31278e72009-06-17 01:12:19 +00003798 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003799 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003800 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003801 addr_len, ha->type);
3802 if (err)
3803 break;
3804 ha->synced = true;
3805 ha->refcount++;
3806 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003807 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3808 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003809 }
3810 }
3811 return err;
3812}
3813
Jiri Pirko31278e72009-06-17 01:12:19 +00003814static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3815 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003816 int addr_len)
3817{
3818 struct netdev_hw_addr *ha, *tmp;
3819
Jiri Pirko31278e72009-06-17 01:12:19 +00003820 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003821 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003822 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003823 addr_len, ha->type);
3824 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003825 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003826 addr_len, ha->type);
3827 }
3828 }
3829}
3830
Jiri Pirko31278e72009-06-17 01:12:19 +00003831static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003832{
3833 struct netdev_hw_addr *ha, *tmp;
3834
Jiri Pirko31278e72009-06-17 01:12:19 +00003835 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003836 list_del_rcu(&ha->list);
3837 call_rcu(&ha->rcu_head, ha_rcu_free);
3838 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003839 list->count = 0;
3840}
3841
3842static void __hw_addr_init(struct netdev_hw_addr_list *list)
3843{
3844 INIT_LIST_HEAD(&list->list);
3845 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003846}
3847
3848/* Device addresses handling functions */
3849
3850static void dev_addr_flush(struct net_device *dev)
3851{
3852 /* rtnl_mutex must be held here */
3853
Jiri Pirko31278e72009-06-17 01:12:19 +00003854 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003855 dev->dev_addr = NULL;
3856}
3857
3858static int dev_addr_init(struct net_device *dev)
3859{
3860 unsigned char addr[MAX_ADDR_LEN];
3861 struct netdev_hw_addr *ha;
3862 int err;
3863
3864 /* rtnl_mutex must be held here */
3865
Jiri Pirko31278e72009-06-17 01:12:19 +00003866 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003867 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003868 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003869 NETDEV_HW_ADDR_T_LAN);
3870 if (!err) {
3871 /*
3872 * Get the first (previously created) address from the list
3873 * and set dev_addr pointer to this location.
3874 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003875 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003876 struct netdev_hw_addr, list);
3877 dev->dev_addr = ha->addr;
3878 }
3879 return err;
3880}
3881
3882/**
3883 * dev_addr_add - Add a device address
3884 * @dev: device
3885 * @addr: address to add
3886 * @addr_type: address type
3887 *
3888 * Add a device address to the device or increase the reference count if
3889 * it already exists.
3890 *
3891 * The caller must hold the rtnl_mutex.
3892 */
3893int dev_addr_add(struct net_device *dev, unsigned char *addr,
3894 unsigned char addr_type)
3895{
3896 int err;
3897
3898 ASSERT_RTNL();
3899
Jiri Pirko31278e72009-06-17 01:12:19 +00003900 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003901 if (!err)
3902 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3903 return err;
3904}
3905EXPORT_SYMBOL(dev_addr_add);
3906
3907/**
3908 * dev_addr_del - Release a device address.
3909 * @dev: device
3910 * @addr: address to delete
3911 * @addr_type: address type
3912 *
3913 * Release reference to a device address and remove it from the device
3914 * if the reference count drops to zero.
3915 *
3916 * The caller must hold the rtnl_mutex.
3917 */
3918int dev_addr_del(struct net_device *dev, unsigned char *addr,
3919 unsigned char addr_type)
3920{
3921 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003922 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003923
3924 ASSERT_RTNL();
3925
Jiri Pirkoccffad252009-05-22 23:22:17 +00003926 /*
3927 * We can not remove the first address from the list because
3928 * dev->dev_addr points to that.
3929 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003930 ha = list_first_entry(&dev->dev_addrs.list,
3931 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003932 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3933 return -ENOENT;
3934
Jiri Pirko31278e72009-06-17 01:12:19 +00003935 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003936 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003937 if (!err)
3938 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3939 return err;
3940}
3941EXPORT_SYMBOL(dev_addr_del);
3942
3943/**
3944 * dev_addr_add_multiple - Add device addresses from another device
3945 * @to_dev: device to which addresses will be added
3946 * @from_dev: device from which addresses will be added
3947 * @addr_type: address type - 0 means type will be used from from_dev
3948 *
3949 * Add device addresses of the one device to another.
3950 **
3951 * The caller must hold the rtnl_mutex.
3952 */
3953int dev_addr_add_multiple(struct net_device *to_dev,
3954 struct net_device *from_dev,
3955 unsigned char addr_type)
3956{
3957 int err;
3958
3959 ASSERT_RTNL();
3960
3961 if (from_dev->addr_len != to_dev->addr_len)
3962 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003963 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003964 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003965 if (!err)
3966 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3967 return err;
3968}
3969EXPORT_SYMBOL(dev_addr_add_multiple);
3970
3971/**
3972 * dev_addr_del_multiple - Delete device addresses by another device
3973 * @to_dev: device where the addresses will be deleted
3974 * @from_dev: device by which addresses the addresses will be deleted
3975 * @addr_type: address type - 0 means type will used from from_dev
3976 *
3977 * Deletes addresses in to device by the list of addresses in from device.
3978 *
3979 * The caller must hold the rtnl_mutex.
3980 */
3981int dev_addr_del_multiple(struct net_device *to_dev,
3982 struct net_device *from_dev,
3983 unsigned char addr_type)
3984{
3985 ASSERT_RTNL();
3986
3987 if (from_dev->addr_len != to_dev->addr_len)
3988 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003989 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003990 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003991 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3992 return 0;
3993}
3994EXPORT_SYMBOL(dev_addr_del_multiple);
3995
Jiri Pirko31278e72009-06-17 01:12:19 +00003996/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003997
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003998int __dev_addr_delete(struct dev_addr_list **list, int *count,
3999 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004000{
4001 struct dev_addr_list *da;
4002
4003 for (; (da = *list) != NULL; list = &da->next) {
4004 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4005 alen == da->da_addrlen) {
4006 if (glbl) {
4007 int old_glbl = da->da_gusers;
4008 da->da_gusers = 0;
4009 if (old_glbl == 0)
4010 break;
4011 }
4012 if (--da->da_users)
4013 return 0;
4014
4015 *list = da->next;
4016 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004017 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07004018 return 0;
4019 }
4020 }
4021 return -ENOENT;
4022}
4023
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004024int __dev_addr_add(struct dev_addr_list **list, int *count,
4025 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004026{
4027 struct dev_addr_list *da;
4028
4029 for (da = *list; da != NULL; da = da->next) {
4030 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4031 da->da_addrlen == alen) {
4032 if (glbl) {
4033 int old_glbl = da->da_gusers;
4034 da->da_gusers = 1;
4035 if (old_glbl)
4036 return 0;
4037 }
4038 da->da_users++;
4039 return 0;
4040 }
4041 }
4042
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08004043 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07004044 if (da == NULL)
4045 return -ENOMEM;
4046 memcpy(da->da_addr, addr, alen);
4047 da->da_addrlen = alen;
4048 da->da_users = 1;
4049 da->da_gusers = glbl ? 1 : 0;
4050 da->next = *list;
4051 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004052 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07004053 return 0;
4054}
4055
Patrick McHardy4417da62007-06-27 01:28:10 -07004056/**
4057 * dev_unicast_delete - Release secondary unicast address.
4058 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004059 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07004060 *
4061 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004062 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07004063 *
4064 * The caller must hold the rtnl_mutex.
4065 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004066int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004067{
4068 int err;
4069
4070 ASSERT_RTNL();
4071
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004072 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004073 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4074 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004075 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004076 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004077 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004078 return err;
4079}
4080EXPORT_SYMBOL(dev_unicast_delete);
4081
4082/**
4083 * dev_unicast_add - add a secondary unicast address
4084 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004085 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004086 *
4087 * Add a secondary unicast address to the device or increase
4088 * the reference count if it already exists.
4089 *
4090 * The caller must hold the rtnl_mutex.
4091 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004092int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004093{
4094 int err;
4095
4096 ASSERT_RTNL();
4097
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004098 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004099 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4100 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004101 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004102 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004103 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004104 return err;
4105}
4106EXPORT_SYMBOL(dev_unicast_add);
4107
Chris Leeche83a2ea2008-01-31 16:53:23 -08004108int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4109 struct dev_addr_list **from, int *from_count)
4110{
4111 struct dev_addr_list *da, *next;
4112 int err = 0;
4113
4114 da = *from;
4115 while (da != NULL) {
4116 next = da->next;
4117 if (!da->da_synced) {
4118 err = __dev_addr_add(to, to_count,
4119 da->da_addr, da->da_addrlen, 0);
4120 if (err < 0)
4121 break;
4122 da->da_synced = 1;
4123 da->da_users++;
4124 } else if (da->da_users == 1) {
4125 __dev_addr_delete(to, to_count,
4126 da->da_addr, da->da_addrlen, 0);
4127 __dev_addr_delete(from, from_count,
4128 da->da_addr, da->da_addrlen, 0);
4129 }
4130 da = next;
4131 }
4132 return err;
4133}
Johannes Bergc4029082009-06-17 17:43:30 +02004134EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004135
4136void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4137 struct dev_addr_list **from, int *from_count)
4138{
4139 struct dev_addr_list *da, *next;
4140
4141 da = *from;
4142 while (da != NULL) {
4143 next = da->next;
4144 if (da->da_synced) {
4145 __dev_addr_delete(to, to_count,
4146 da->da_addr, da->da_addrlen, 0);
4147 da->da_synced = 0;
4148 __dev_addr_delete(from, from_count,
4149 da->da_addr, da->da_addrlen, 0);
4150 }
4151 da = next;
4152 }
4153}
Johannes Bergc4029082009-06-17 17:43:30 +02004154EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004155
4156/**
4157 * dev_unicast_sync - Synchronize device's unicast list to another device
4158 * @to: destination device
4159 * @from: source device
4160 *
4161 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004162 * addresses that have no users left. The source device must be
4163 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004164 *
4165 * This function is intended to be called from the dev->set_rx_mode
4166 * function of layered software devices.
4167 */
4168int dev_unicast_sync(struct net_device *to, struct net_device *from)
4169{
4170 int err = 0;
4171
Jiri Pirkoccffad252009-05-22 23:22:17 +00004172 if (to->addr_len != from->addr_len)
4173 return -EINVAL;
4174
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004175 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004176 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004177 if (!err)
4178 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004179 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004180 return err;
4181}
4182EXPORT_SYMBOL(dev_unicast_sync);
4183
4184/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004185 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004186 * @to: destination device
4187 * @from: source device
4188 *
4189 * Remove all addresses that were added to the destination device by
4190 * dev_unicast_sync(). This function is intended to be called from the
4191 * dev->stop function of layered software devices.
4192 */
4193void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4194{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004195 if (to->addr_len != from->addr_len)
4196 return;
4197
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004198 netif_addr_lock_bh(from);
4199 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004200 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004201 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004202 netif_addr_unlock(to);
4203 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004204}
4205EXPORT_SYMBOL(dev_unicast_unsync);
4206
Jiri Pirkoccffad252009-05-22 23:22:17 +00004207static void dev_unicast_flush(struct net_device *dev)
4208{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004209 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004210 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004211 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004212}
4213
4214static void dev_unicast_init(struct net_device *dev)
4215{
Jiri Pirko31278e72009-06-17 01:12:19 +00004216 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004217}
4218
4219
Denis Cheng12972622007-07-18 02:12:56 -07004220static void __dev_addr_discard(struct dev_addr_list **list)
4221{
4222 struct dev_addr_list *tmp;
4223
4224 while (*list != NULL) {
4225 tmp = *list;
4226 *list = tmp->next;
4227 if (tmp->da_users > tmp->da_gusers)
4228 printk("__dev_addr_discard: address leakage! "
4229 "da_users=%d\n", tmp->da_users);
4230 kfree(tmp);
4231 }
4232}
4233
Denis Cheng26cc2522007-07-18 02:12:03 -07004234static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004235{
David S. Millerb9e40852008-07-15 00:15:08 -07004236 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004237
Denis Cheng456ad752007-07-18 02:10:54 -07004238 __dev_addr_discard(&dev->mc_list);
4239 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004240
David S. Millerb9e40852008-07-15 00:15:08 -07004241 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004242}
4243
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004244/**
4245 * dev_get_flags - get flags reported to userspace
4246 * @dev: device
4247 *
4248 * Get the combination of flag bits exported through APIs to userspace.
4249 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250unsigned dev_get_flags(const struct net_device *dev)
4251{
4252 unsigned flags;
4253
4254 flags = (dev->flags & ~(IFF_PROMISC |
4255 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004256 IFF_RUNNING |
4257 IFF_LOWER_UP |
4258 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 (dev->gflags & (IFF_PROMISC |
4260 IFF_ALLMULTI));
4261
Stefan Rompfb00055a2006-03-20 17:09:11 -08004262 if (netif_running(dev)) {
4263 if (netif_oper_up(dev))
4264 flags |= IFF_RUNNING;
4265 if (netif_carrier_ok(dev))
4266 flags |= IFF_LOWER_UP;
4267 if (netif_dormant(dev))
4268 flags |= IFF_DORMANT;
4269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270
4271 return flags;
4272}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004273EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004275/**
4276 * dev_change_flags - change device settings
4277 * @dev: device
4278 * @flags: device state flags
4279 *
4280 * Change settings on device based state flags. The flags are
4281 * in the userspace exported format.
4282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283int dev_change_flags(struct net_device *dev, unsigned flags)
4284{
Thomas Graf7c355f52007-06-05 16:03:03 -07004285 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 int old_flags = dev->flags;
4287
Patrick McHardy24023452007-07-14 18:51:31 -07004288 ASSERT_RTNL();
4289
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 /*
4291 * Set the flags on our device.
4292 */
4293
4294 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4295 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4296 IFF_AUTOMEDIA)) |
4297 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4298 IFF_ALLMULTI));
4299
4300 /*
4301 * Load in the correct multicast list now the flags have changed.
4302 */
4303
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004304 if ((old_flags ^ flags) & IFF_MULTICAST)
4305 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004306
Patrick McHardy4417da62007-06-27 01:28:10 -07004307 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308
4309 /*
4310 * Have we downed the interface. We handle IFF_UP ourselves
4311 * according to user attempts to set it, rather than blindly
4312 * setting it.
4313 */
4314
4315 ret = 0;
4316 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4317 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4318
4319 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004320 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 }
4322
4323 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004324 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004326 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004329 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4330
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 dev->gflags ^= IFF_PROMISC;
4332 dev_set_promiscuity(dev, inc);
4333 }
4334
4335 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4336 is important. Some (broken) drivers set IFF_PROMISC, when
4337 IFF_ALLMULTI is requested not asking us and not reporting.
4338 */
4339 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004340 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4341
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 dev->gflags ^= IFF_ALLMULTI;
4343 dev_set_allmulti(dev, inc);
4344 }
4345
Thomas Graf7c355f52007-06-05 16:03:03 -07004346 /* Exclude state transition flags, already notified */
4347 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4348 if (changes)
4349 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
4351 return ret;
4352}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004353EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004355/**
4356 * dev_set_mtu - Change maximum transfer unit
4357 * @dev: device
4358 * @new_mtu: new transfer unit
4359 *
4360 * Change the maximum transfer size of the network device.
4361 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362int dev_set_mtu(struct net_device *dev, int new_mtu)
4363{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004364 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 int err;
4366
4367 if (new_mtu == dev->mtu)
4368 return 0;
4369
4370 /* MTU must be positive. */
4371 if (new_mtu < 0)
4372 return -EINVAL;
4373
4374 if (!netif_device_present(dev))
4375 return -ENODEV;
4376
4377 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004378 if (ops->ndo_change_mtu)
4379 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004380 else
4381 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004382
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004384 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 return err;
4386}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004387EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004389/**
4390 * dev_set_mac_address - Change Media Access Control Address
4391 * @dev: device
4392 * @sa: new address
4393 *
4394 * Change the hardware (MAC) address of the device
4395 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4397{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004398 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399 int err;
4400
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004401 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 return -EOPNOTSUPP;
4403 if (sa->sa_family != dev->type)
4404 return -EINVAL;
4405 if (!netif_device_present(dev))
4406 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004407 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004409 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 return err;
4411}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004412EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413
4414/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004415 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004417static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418{
4419 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004420 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
4422 if (!dev)
4423 return -ENODEV;
4424
4425 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004426 case SIOCGIFFLAGS: /* Get interface flags */
4427 ifr->ifr_flags = (short) dev_get_flags(dev);
4428 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004430 case SIOCGIFMETRIC: /* Get the metric on the interface
4431 (currently unused) */
4432 ifr->ifr_metric = 0;
4433 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004435 case SIOCGIFMTU: /* Get the MTU of a device */
4436 ifr->ifr_mtu = dev->mtu;
4437 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004439 case SIOCGIFHWADDR:
4440 if (!dev->addr_len)
4441 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4442 else
4443 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4444 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4445 ifr->ifr_hwaddr.sa_family = dev->type;
4446 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004448 case SIOCGIFSLAVE:
4449 err = -EINVAL;
4450 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004451
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004452 case SIOCGIFMAP:
4453 ifr->ifr_map.mem_start = dev->mem_start;
4454 ifr->ifr_map.mem_end = dev->mem_end;
4455 ifr->ifr_map.base_addr = dev->base_addr;
4456 ifr->ifr_map.irq = dev->irq;
4457 ifr->ifr_map.dma = dev->dma;
4458 ifr->ifr_map.port = dev->if_port;
4459 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004460
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004461 case SIOCGIFINDEX:
4462 ifr->ifr_ifindex = dev->ifindex;
4463 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004464
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004465 case SIOCGIFTXQLEN:
4466 ifr->ifr_qlen = dev->tx_queue_len;
4467 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004468
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004469 default:
4470 /* dev_ioctl() should ensure this case
4471 * is never reached
4472 */
4473 WARN_ON(1);
4474 err = -EINVAL;
4475 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004476
4477 }
4478 return err;
4479}
4480
4481/*
4482 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4483 */
4484static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4485{
4486 int err;
4487 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004488 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004489
4490 if (!dev)
4491 return -ENODEV;
4492
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004493 ops = dev->netdev_ops;
4494
Jeff Garzik14e3e072007-10-08 00:06:32 -07004495 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004496 case SIOCSIFFLAGS: /* Set interface flags */
4497 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004498
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004499 case SIOCSIFMETRIC: /* Set the metric on the interface
4500 (currently unused) */
4501 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004502
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004503 case SIOCSIFMTU: /* Set the MTU of a device */
4504 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004505
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004506 case SIOCSIFHWADDR:
4507 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004508
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004509 case SIOCSIFHWBROADCAST:
4510 if (ifr->ifr_hwaddr.sa_family != dev->type)
4511 return -EINVAL;
4512 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4513 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4514 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4515 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004517 case SIOCSIFMAP:
4518 if (ops->ndo_set_config) {
4519 if (!netif_device_present(dev))
4520 return -ENODEV;
4521 return ops->ndo_set_config(dev, &ifr->ifr_map);
4522 }
4523 return -EOPNOTSUPP;
4524
4525 case SIOCADDMULTI:
4526 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4527 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4528 return -EINVAL;
4529 if (!netif_device_present(dev))
4530 return -ENODEV;
4531 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4532 dev->addr_len, 1);
4533
4534 case SIOCDELMULTI:
4535 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4536 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4537 return -EINVAL;
4538 if (!netif_device_present(dev))
4539 return -ENODEV;
4540 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4541 dev->addr_len, 1);
4542
4543 case SIOCSIFTXQLEN:
4544 if (ifr->ifr_qlen < 0)
4545 return -EINVAL;
4546 dev->tx_queue_len = ifr->ifr_qlen;
4547 return 0;
4548
4549 case SIOCSIFNAME:
4550 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4551 return dev_change_name(dev, ifr->ifr_newname);
4552
4553 /*
4554 * Unknown or private ioctl
4555 */
4556 default:
4557 if ((cmd >= SIOCDEVPRIVATE &&
4558 cmd <= SIOCDEVPRIVATE + 15) ||
4559 cmd == SIOCBONDENSLAVE ||
4560 cmd == SIOCBONDRELEASE ||
4561 cmd == SIOCBONDSETHWADDR ||
4562 cmd == SIOCBONDSLAVEINFOQUERY ||
4563 cmd == SIOCBONDINFOQUERY ||
4564 cmd == SIOCBONDCHANGEACTIVE ||
4565 cmd == SIOCGMIIPHY ||
4566 cmd == SIOCGMIIREG ||
4567 cmd == SIOCSMIIREG ||
4568 cmd == SIOCBRADDIF ||
4569 cmd == SIOCBRDELIF ||
4570 cmd == SIOCSHWTSTAMP ||
4571 cmd == SIOCWANDEV) {
4572 err = -EOPNOTSUPP;
4573 if (ops->ndo_do_ioctl) {
4574 if (netif_device_present(dev))
4575 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4576 else
4577 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004579 } else
4580 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
4582 }
4583 return err;
4584}
4585
4586/*
4587 * This function handles all "interface"-type I/O control requests. The actual
4588 * 'doing' part of this is dev_ifsioc above.
4589 */
4590
4591/**
4592 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004593 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 * @cmd: command to issue
4595 * @arg: pointer to a struct ifreq in user space
4596 *
4597 * Issue ioctl functions to devices. This is normally called by the
4598 * user space syscall interfaces but can sometimes be useful for
4599 * other purposes. The return value is the return from the syscall if
4600 * positive or a negative errno code on error.
4601 */
4602
Eric W. Biederman881d9662007-09-17 11:56:21 -07004603int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604{
4605 struct ifreq ifr;
4606 int ret;
4607 char *colon;
4608
4609 /* One special case: SIOCGIFCONF takes ifconf argument
4610 and requires shared lock, because it sleeps writing
4611 to user space.
4612 */
4613
4614 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004615 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004616 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004617 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 return ret;
4619 }
4620 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004621 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622
4623 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4624 return -EFAULT;
4625
4626 ifr.ifr_name[IFNAMSIZ-1] = 0;
4627
4628 colon = strchr(ifr.ifr_name, ':');
4629 if (colon)
4630 *colon = 0;
4631
4632 /*
4633 * See which interface the caller is talking about.
4634 */
4635
4636 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004637 /*
4638 * These ioctl calls:
4639 * - can be done by all.
4640 * - atomic and do not require locking.
4641 * - return a value
4642 */
4643 case SIOCGIFFLAGS:
4644 case SIOCGIFMETRIC:
4645 case SIOCGIFMTU:
4646 case SIOCGIFHWADDR:
4647 case SIOCGIFSLAVE:
4648 case SIOCGIFMAP:
4649 case SIOCGIFINDEX:
4650 case SIOCGIFTXQLEN:
4651 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004652 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004653 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004654 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004655 if (!ret) {
4656 if (colon)
4657 *colon = ':';
4658 if (copy_to_user(arg, &ifr,
4659 sizeof(struct ifreq)))
4660 ret = -EFAULT;
4661 }
4662 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004664 case SIOCETHTOOL:
4665 dev_load(net, ifr.ifr_name);
4666 rtnl_lock();
4667 ret = dev_ethtool(net, &ifr);
4668 rtnl_unlock();
4669 if (!ret) {
4670 if (colon)
4671 *colon = ':';
4672 if (copy_to_user(arg, &ifr,
4673 sizeof(struct ifreq)))
4674 ret = -EFAULT;
4675 }
4676 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004678 /*
4679 * These ioctl calls:
4680 * - require superuser power.
4681 * - require strict serialization.
4682 * - return a value
4683 */
4684 case SIOCGMIIPHY:
4685 case SIOCGMIIREG:
4686 case SIOCSIFNAME:
4687 if (!capable(CAP_NET_ADMIN))
4688 return -EPERM;
4689 dev_load(net, ifr.ifr_name);
4690 rtnl_lock();
4691 ret = dev_ifsioc(net, &ifr, cmd);
4692 rtnl_unlock();
4693 if (!ret) {
4694 if (colon)
4695 *colon = ':';
4696 if (copy_to_user(arg, &ifr,
4697 sizeof(struct ifreq)))
4698 ret = -EFAULT;
4699 }
4700 return ret;
4701
4702 /*
4703 * These ioctl calls:
4704 * - require superuser power.
4705 * - require strict serialization.
4706 * - do not return a value
4707 */
4708 case SIOCSIFFLAGS:
4709 case SIOCSIFMETRIC:
4710 case SIOCSIFMTU:
4711 case SIOCSIFMAP:
4712 case SIOCSIFHWADDR:
4713 case SIOCSIFSLAVE:
4714 case SIOCADDMULTI:
4715 case SIOCDELMULTI:
4716 case SIOCSIFHWBROADCAST:
4717 case SIOCSIFTXQLEN:
4718 case SIOCSMIIREG:
4719 case SIOCBONDENSLAVE:
4720 case SIOCBONDRELEASE:
4721 case SIOCBONDSETHWADDR:
4722 case SIOCBONDCHANGEACTIVE:
4723 case SIOCBRADDIF:
4724 case SIOCBRDELIF:
4725 case SIOCSHWTSTAMP:
4726 if (!capable(CAP_NET_ADMIN))
4727 return -EPERM;
4728 /* fall through */
4729 case SIOCBONDSLAVEINFOQUERY:
4730 case SIOCBONDINFOQUERY:
4731 dev_load(net, ifr.ifr_name);
4732 rtnl_lock();
4733 ret = dev_ifsioc(net, &ifr, cmd);
4734 rtnl_unlock();
4735 return ret;
4736
4737 case SIOCGIFMEM:
4738 /* Get the per device memory space. We can add this but
4739 * currently do not support it */
4740 case SIOCSIFMEM:
4741 /* Set the per device memory buffer space.
4742 * Not applicable in our case */
4743 case SIOCSIFLINK:
4744 return -EINVAL;
4745
4746 /*
4747 * Unknown or private ioctl.
4748 */
4749 default:
4750 if (cmd == SIOCWANDEV ||
4751 (cmd >= SIOCDEVPRIVATE &&
4752 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004753 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004755 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004757 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004759 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004761 }
4762 /* Take care of Wireless Extensions */
4763 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4764 return wext_handle_ioctl(net, &ifr, cmd, arg);
4765 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 }
4767}
4768
4769
4770/**
4771 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004772 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 *
4774 * Returns a suitable unique value for a new device interface
4775 * number. The caller must hold the rtnl semaphore or the
4776 * dev_base_lock to be sure it remains unique.
4777 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004778static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004779{
4780 static int ifindex;
4781 for (;;) {
4782 if (++ifindex <= 0)
4783 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004784 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 return ifindex;
4786 }
4787}
4788
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004790static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004792static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795}
4796
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004797static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004798{
Krishna Kumare93737b2009-12-08 22:26:02 +00004799 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004800
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004801 BUG_ON(dev_boot_phase);
4802 ASSERT_RTNL();
4803
Krishna Kumare93737b2009-12-08 22:26:02 +00004804 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004805 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004806 * for initialization unwind. Remove those
4807 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004808 */
4809 if (dev->reg_state == NETREG_UNINITIALIZED) {
4810 pr_debug("unregister_netdevice: device %s/%p never "
4811 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004812
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004813 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004814 list_del(&dev->unreg_list);
4815 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004816 }
4817
4818 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4819
4820 /* If device is running, close it first. */
4821 dev_close(dev);
4822
4823 /* And unlink it from device chain. */
4824 unlist_netdevice(dev);
4825
4826 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004827 }
4828
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004829 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004830
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004831 list_for_each_entry(dev, head, unreg_list) {
4832 /* Shutdown queueing discipline. */
4833 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004834
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004835
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004836 /* Notify protocols, that we are about to destroy
4837 this device. They should clean all the things.
4838 */
4839 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4840
4841 /*
4842 * Flush the unicast and multicast chains
4843 */
4844 dev_unicast_flush(dev);
4845 dev_addr_discard(dev);
4846
4847 if (dev->netdev_ops->ndo_uninit)
4848 dev->netdev_ops->ndo_uninit(dev);
4849
4850 /* Notifier chain MUST detach us from master device. */
4851 WARN_ON(dev->master);
4852
4853 /* Remove entries from kobject tree */
4854 netdev_unregister_kobject(dev);
4855 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004856
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004857 /* Process any work delayed until the end of the batch */
4858 dev = list_entry(head->next, struct net_device, unreg_list);
4859 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4860
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004861 synchronize_net();
4862
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004863 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004864 dev_put(dev);
4865}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004866
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004867static void rollback_registered(struct net_device *dev)
4868{
4869 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004870
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004871 list_add(&dev->unreg_list, &single);
4872 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004873}
4874
David S. Millere8a04642008-07-17 00:34:19 -07004875static void __netdev_init_queue_locks_one(struct net_device *dev,
4876 struct netdev_queue *dev_queue,
4877 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004878{
4879 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004880 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004881 dev_queue->xmit_lock_owner = -1;
4882}
4883
4884static void netdev_init_queue_locks(struct net_device *dev)
4885{
David S. Millere8a04642008-07-17 00:34:19 -07004886 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4887 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004888}
4889
Herbert Xub63365a2008-10-23 01:11:29 -07004890unsigned long netdev_fix_features(unsigned long features, const char *name)
4891{
4892 /* Fix illegal SG+CSUM combinations. */
4893 if ((features & NETIF_F_SG) &&
4894 !(features & NETIF_F_ALL_CSUM)) {
4895 if (name)
4896 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4897 "checksum feature.\n", name);
4898 features &= ~NETIF_F_SG;
4899 }
4900
4901 /* TSO requires that SG is present as well. */
4902 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4903 if (name)
4904 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4905 "SG feature.\n", name);
4906 features &= ~NETIF_F_TSO;
4907 }
4908
4909 if (features & NETIF_F_UFO) {
4910 if (!(features & NETIF_F_GEN_CSUM)) {
4911 if (name)
4912 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4913 "since no NETIF_F_HW_CSUM feature.\n",
4914 name);
4915 features &= ~NETIF_F_UFO;
4916 }
4917
4918 if (!(features & NETIF_F_SG)) {
4919 if (name)
4920 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4921 "since no NETIF_F_SG feature.\n", name);
4922 features &= ~NETIF_F_UFO;
4923 }
4924 }
4925
4926 return features;
4927}
4928EXPORT_SYMBOL(netdev_fix_features);
4929
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08004931 * netif_stacked_transfer_operstate - transfer operstate
4932 * @rootdev: the root or lower level device to transfer state from
4933 * @dev: the device to transfer operstate to
4934 *
4935 * Transfer operational state from root to device. This is normally
4936 * called when a stacking relationship exists between the root
4937 * device and the device(a leaf device).
4938 */
4939void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4940 struct net_device *dev)
4941{
4942 if (rootdev->operstate == IF_OPER_DORMANT)
4943 netif_dormant_on(dev);
4944 else
4945 netif_dormant_off(dev);
4946
4947 if (netif_carrier_ok(rootdev)) {
4948 if (!netif_carrier_ok(dev))
4949 netif_carrier_on(dev);
4950 } else {
4951 if (netif_carrier_ok(dev))
4952 netif_carrier_off(dev);
4953 }
4954}
4955EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4956
4957/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 * register_netdevice - register a network device
4959 * @dev: device to register
4960 *
4961 * Take a completed network device structure and add it to the kernel
4962 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4963 * chain. 0 is returned on success. A negative errno code is returned
4964 * on a failure to set up the device, or if the name is a duplicate.
4965 *
4966 * Callers must hold the rtnl semaphore. You may want
4967 * register_netdev() instead of this.
4968 *
4969 * BUGS:
4970 * The locking appears insufficient to guarantee two parallel registers
4971 * will not get the same name.
4972 */
4973
4974int register_netdevice(struct net_device *dev)
4975{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004977 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978
4979 BUG_ON(dev_boot_phase);
4980 ASSERT_RTNL();
4981
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004982 might_sleep();
4983
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 /* When net_device's are persistent, this will be fatal. */
4985 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004986 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987
David S. Millerf1f28aa2008-07-15 00:08:33 -07004988 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004989 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004990 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992 dev->iflink = -1;
4993
4994 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004995 if (dev->netdev_ops->ndo_init) {
4996 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997 if (ret) {
4998 if (ret > 0)
4999 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005000 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005001 }
5002 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005003
Octavian Purdilad9031022009-11-18 02:36:59 +00005004 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5005 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005006 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007
Eric W. Biederman881d9662007-09-17 11:56:21 -07005008 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 if (dev->iflink == -1)
5010 dev->iflink = dev->ifindex;
5011
Stephen Hemmingerd212f872007-06-27 00:47:37 -07005012 /* Fix illegal checksum combinations */
5013 if ((dev->features & NETIF_F_HW_CSUM) &&
5014 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5015 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5016 dev->name);
5017 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5018 }
5019
5020 if ((dev->features & NETIF_F_NO_CSUM) &&
5021 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5022 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5023 dev->name);
5024 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5025 }
5026
Herbert Xub63365a2008-10-23 01:11:29 -07005027 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07005029 /* Enable software GSO if SG is supported. */
5030 if (dev->features & NETIF_F_SG)
5031 dev->features |= NETIF_F_GSO;
5032
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005033 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005034
5035 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5036 ret = notifier_to_errno(ret);
5037 if (ret)
5038 goto err_uninit;
5039
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005040 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005041 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005042 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005043 dev->reg_state = NETREG_REGISTERED;
5044
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 /*
5046 * Default initial state at registry is that the
5047 * device is present.
5048 */
5049
5050 set_bit(__LINK_STATE_PRESENT, &dev->state);
5051
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005054 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055
5056 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005057 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005058 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005059 if (ret) {
5060 rollback_registered(dev);
5061 dev->reg_state = NETREG_UNREGISTERED;
5062 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005063 /*
5064 * Prevent userspace races by waiting until the network
5065 * device is fully setup before sending notifications.
5066 */
5067 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068
5069out:
5070 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005071
5072err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005073 if (dev->netdev_ops->ndo_uninit)
5074 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005075 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005077EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
5079/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005080 * init_dummy_netdev - init a dummy network device for NAPI
5081 * @dev: device to init
5082 *
5083 * This takes a network device structure and initialize the minimum
5084 * amount of fields so it can be used to schedule NAPI polls without
5085 * registering a full blown interface. This is to be used by drivers
5086 * that need to tie several hardware interfaces to a single NAPI
5087 * poll scheduler due to HW limitations.
5088 */
5089int init_dummy_netdev(struct net_device *dev)
5090{
5091 /* Clear everything. Note we don't initialize spinlocks
5092 * are they aren't supposed to be taken by any of the
5093 * NAPI code and this dummy netdev is supposed to be
5094 * only ever used for NAPI polls
5095 */
5096 memset(dev, 0, sizeof(struct net_device));
5097
5098 /* make sure we BUG if trying to hit standard
5099 * register/unregister code path
5100 */
5101 dev->reg_state = NETREG_DUMMY;
5102
5103 /* initialize the ref count */
5104 atomic_set(&dev->refcnt, 1);
5105
5106 /* NAPI wants this */
5107 INIT_LIST_HEAD(&dev->napi_list);
5108
5109 /* a dummy interface is started by default */
5110 set_bit(__LINK_STATE_PRESENT, &dev->state);
5111 set_bit(__LINK_STATE_START, &dev->state);
5112
5113 return 0;
5114}
5115EXPORT_SYMBOL_GPL(init_dummy_netdev);
5116
5117
5118/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 * register_netdev - register a network device
5120 * @dev: device to register
5121 *
5122 * Take a completed network device structure and add it to the kernel
5123 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5124 * chain. 0 is returned on success. A negative errno code is returned
5125 * on a failure to set up the device, or if the name is a duplicate.
5126 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005127 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128 * and expands the device name if you passed a format string to
5129 * alloc_netdev.
5130 */
5131int register_netdev(struct net_device *dev)
5132{
5133 int err;
5134
5135 rtnl_lock();
5136
5137 /*
5138 * If the name is a format string the caller wants us to do a
5139 * name allocation.
5140 */
5141 if (strchr(dev->name, '%')) {
5142 err = dev_alloc_name(dev, dev->name);
5143 if (err < 0)
5144 goto out;
5145 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005146
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 err = register_netdevice(dev);
5148out:
5149 rtnl_unlock();
5150 return err;
5151}
5152EXPORT_SYMBOL(register_netdev);
5153
5154/*
5155 * netdev_wait_allrefs - wait until all references are gone.
5156 *
5157 * This is called when unregistering network devices.
5158 *
5159 * Any protocol or device that holds a reference should register
5160 * for netdevice notification, and cleanup and put back the
5161 * reference if they receive an UNREGISTER event.
5162 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005163 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 */
5165static void netdev_wait_allrefs(struct net_device *dev)
5166{
5167 unsigned long rebroadcast_time, warning_time;
5168
Eric Dumazete014deb2009-11-17 05:59:21 +00005169 linkwatch_forget_dev(dev);
5170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 rebroadcast_time = warning_time = jiffies;
5172 while (atomic_read(&dev->refcnt) != 0) {
5173 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005174 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005175
5176 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005177 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005178 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005179 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
5181 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5182 &dev->state)) {
5183 /* We must not have linkwatch events
5184 * pending on unregister. If this
5185 * happens, we simply run the queue
5186 * unscheduled, resulting in a noop
5187 * for this device.
5188 */
5189 linkwatch_run_queue();
5190 }
5191
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005192 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193
5194 rebroadcast_time = jiffies;
5195 }
5196
5197 msleep(250);
5198
5199 if (time_after(jiffies, warning_time + 10 * HZ)) {
5200 printk(KERN_EMERG "unregister_netdevice: "
5201 "waiting for %s to become free. Usage "
5202 "count = %d\n",
5203 dev->name, atomic_read(&dev->refcnt));
5204 warning_time = jiffies;
5205 }
5206 }
5207}
5208
5209/* The sequence is:
5210 *
5211 * rtnl_lock();
5212 * ...
5213 * register_netdevice(x1);
5214 * register_netdevice(x2);
5215 * ...
5216 * unregister_netdevice(y1);
5217 * unregister_netdevice(y2);
5218 * ...
5219 * rtnl_unlock();
5220 * free_netdev(y1);
5221 * free_netdev(y2);
5222 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005223 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005225 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 * without deadlocking with linkwatch via keventd.
5227 * 2) Since we run with the RTNL semaphore not held, we can sleep
5228 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005229 *
5230 * We must not return until all unregister events added during
5231 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233void netdev_run_todo(void)
5234{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005235 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005238 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005239
5240 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005241
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 while (!list_empty(&list)) {
5243 struct net_device *dev
5244 = list_entry(list.next, struct net_device, todo_list);
5245 list_del(&dev->todo_list);
5246
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005247 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 printk(KERN_ERR "network todo '%s' but state %d\n",
5249 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005250 dump_stack();
5251 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005253
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005254 dev->reg_state = NETREG_UNREGISTERED;
5255
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005256 on_each_cpu(flush_backlog, dev, 1);
5257
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005258 netdev_wait_allrefs(dev);
5259
5260 /* paranoia */
5261 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005262 WARN_ON(dev->ip_ptr);
5263 WARN_ON(dev->ip6_ptr);
5264 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005265
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005266 if (dev->destructor)
5267 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005268
5269 /* Free network device */
5270 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272}
5273
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005274/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005275 * dev_txq_stats_fold - fold tx_queues stats
5276 * @dev: device to get statistics from
5277 * @stats: struct net_device_stats to hold results
5278 */
5279void dev_txq_stats_fold(const struct net_device *dev,
5280 struct net_device_stats *stats)
5281{
5282 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5283 unsigned int i;
5284 struct netdev_queue *txq;
5285
5286 for (i = 0; i < dev->num_tx_queues; i++) {
5287 txq = netdev_get_tx_queue(dev, i);
5288 tx_bytes += txq->tx_bytes;
5289 tx_packets += txq->tx_packets;
5290 tx_dropped += txq->tx_dropped;
5291 }
5292 if (tx_bytes || tx_packets || tx_dropped) {
5293 stats->tx_bytes = tx_bytes;
5294 stats->tx_packets = tx_packets;
5295 stats->tx_dropped = tx_dropped;
5296 }
5297}
5298EXPORT_SYMBOL(dev_txq_stats_fold);
5299
5300/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005301 * dev_get_stats - get network device statistics
5302 * @dev: device to get statistics from
5303 *
5304 * Get network statistics from device. The device driver may provide
5305 * its own method by setting dev->netdev_ops->get_stats; otherwise
5306 * the internal statistics structure is used.
5307 */
5308const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005309{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005310 const struct net_device_ops *ops = dev->netdev_ops;
5311
5312 if (ops->ndo_get_stats)
5313 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005314
Eric Dumazetd83345a2009-11-16 03:36:51 +00005315 dev_txq_stats_fold(dev, &dev->stats);
5316 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005317}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005318EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005319
David S. Millerdc2b4842008-07-08 17:18:23 -07005320static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005321 struct netdev_queue *queue,
5322 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005323{
David S. Millerdc2b4842008-07-08 17:18:23 -07005324 queue->dev = dev;
5325}
5326
David S. Millerbb949fb2008-07-08 16:55:56 -07005327static void netdev_init_queues(struct net_device *dev)
5328{
David S. Millere8a04642008-07-17 00:34:19 -07005329 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5330 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005331 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005332}
5333
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005335 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005336 * @sizeof_priv: size of private data to allocate space for
5337 * @name: device name format string
5338 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005339 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340 *
5341 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005342 * and performs basic initialization. Also allocates subquue structs
5343 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005345struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5346 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347{
David S. Millere8a04642008-07-17 00:34:19 -07005348 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005350 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005351 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005353 BUG_ON(strlen(name) >= sizeof(dev->name));
5354
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005355 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005356 if (sizeof_priv) {
5357 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005358 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005359 alloc_size += sizeof_priv;
5360 }
5361 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005362 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005364 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005366 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 return NULL;
5368 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369
Stephen Hemminger79439862008-07-21 13:28:44 -07005370 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005371 if (!tx) {
5372 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5373 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005374 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005375 }
5376
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005377 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005379
5380 if (dev_addr_init(dev))
5381 goto free_tx;
5382
Jiri Pirkoccffad252009-05-22 23:22:17 +00005383 dev_unicast_init(dev);
5384
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005385 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386
David S. Millere8a04642008-07-17 00:34:19 -07005387 dev->_tx = tx;
5388 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005389 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005390
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005391 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392
David S. Millerbb949fb2008-07-08 16:55:56 -07005393 netdev_init_queues(dev);
5394
Herbert Xud565b0a2008-12-15 23:38:52 -08005395 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005396 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005397 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005398 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399 setup(dev);
5400 strcpy(dev->name, name);
5401 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005402
5403free_tx:
5404 kfree(tx);
5405
5406free_p:
5407 kfree(p);
5408 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005410EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005411
5412/**
5413 * free_netdev - free network device
5414 * @dev: device
5415 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005416 * This function does the last stage of destroying an allocated device
5417 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 * If this is the last reference then it will be freed.
5419 */
5420void free_netdev(struct net_device *dev)
5421{
Herbert Xud565b0a2008-12-15 23:38:52 -08005422 struct napi_struct *p, *n;
5423
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005424 release_net(dev_net(dev));
5425
David S. Millere8a04642008-07-17 00:34:19 -07005426 kfree(dev->_tx);
5427
Jiri Pirkof001fde2009-05-05 02:48:28 +00005428 /* Flush device addresses */
5429 dev_addr_flush(dev);
5430
Herbert Xud565b0a2008-12-15 23:38:52 -08005431 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5432 netif_napi_del(p);
5433
Stephen Hemminger3041a062006-05-26 13:25:24 -07005434 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 if (dev->reg_state == NETREG_UNINITIALIZED) {
5436 kfree((char *)dev - dev->padded);
5437 return;
5438 }
5439
5440 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5441 dev->reg_state = NETREG_RELEASED;
5442
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005443 /* will free via device release */
5444 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005446EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005447
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005448/**
5449 * synchronize_net - Synchronize with packet receive processing
5450 *
5451 * Wait for packets currently being received to be done.
5452 * Does not block later packets from starting.
5453 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005454void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455{
5456 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005457 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005458}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005459EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460
5461/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005462 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005464 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005465 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005467 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005468 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005469 *
5470 * Callers must hold the rtnl semaphore. You may want
5471 * unregister_netdev() instead of this.
5472 */
5473
Eric Dumazet44a08732009-10-27 07:03:04 +00005474void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005475{
Herbert Xua6620712007-12-12 19:21:56 -08005476 ASSERT_RTNL();
5477
Eric Dumazet44a08732009-10-27 07:03:04 +00005478 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005479 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005480 } else {
5481 rollback_registered(dev);
5482 /* Finish processing unregister after unlock */
5483 net_set_todo(dev);
5484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485}
Eric Dumazet44a08732009-10-27 07:03:04 +00005486EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487
5488/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005489 * unregister_netdevice_many - unregister many devices
5490 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005491 */
5492void unregister_netdevice_many(struct list_head *head)
5493{
5494 struct net_device *dev;
5495
5496 if (!list_empty(head)) {
5497 rollback_registered_many(head);
5498 list_for_each_entry(dev, head, unreg_list)
5499 net_set_todo(dev);
5500 }
5501}
Eric Dumazet63c80992009-10-27 07:06:49 +00005502EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005503
5504/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505 * unregister_netdev - remove device from the kernel
5506 * @dev: device
5507 *
5508 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005509 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005510 *
5511 * This is just a wrapper for unregister_netdevice that takes
5512 * the rtnl semaphore. In general you want to use this and not
5513 * unregister_netdevice.
5514 */
5515void unregister_netdev(struct net_device *dev)
5516{
5517 rtnl_lock();
5518 unregister_netdevice(dev);
5519 rtnl_unlock();
5520}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521EXPORT_SYMBOL(unregister_netdev);
5522
Eric W. Biedermance286d32007-09-12 13:53:49 +02005523/**
5524 * dev_change_net_namespace - move device to different nethost namespace
5525 * @dev: device
5526 * @net: network namespace
5527 * @pat: If not NULL name pattern to try if the current device name
5528 * is already taken in the destination network namespace.
5529 *
5530 * This function shuts down a device interface and moves it
5531 * to a new network namespace. On success 0 is returned, on
5532 * a failure a netagive errno code is returned.
5533 *
5534 * Callers must hold the rtnl semaphore.
5535 */
5536
5537int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5538{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005539 int err;
5540
5541 ASSERT_RTNL();
5542
5543 /* Don't allow namespace local devices to be moved. */
5544 err = -EINVAL;
5545 if (dev->features & NETIF_F_NETNS_LOCAL)
5546 goto out;
5547
Eric W. Biederman38918452008-10-27 17:51:47 -07005548#ifdef CONFIG_SYSFS
5549 /* Don't allow real devices to be moved when sysfs
5550 * is enabled.
5551 */
5552 err = -EINVAL;
5553 if (dev->dev.parent)
5554 goto out;
5555#endif
5556
Eric W. Biedermance286d32007-09-12 13:53:49 +02005557 /* Ensure the device has been registrered */
5558 err = -EINVAL;
5559 if (dev->reg_state != NETREG_REGISTERED)
5560 goto out;
5561
5562 /* Get out if there is nothing todo */
5563 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005564 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005565 goto out;
5566
5567 /* Pick the destination device name, and ensure
5568 * we can use it in the destination network namespace.
5569 */
5570 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005571 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005572 /* We get here if we can't use the current device name */
5573 if (!pat)
5574 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005575 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005576 goto out;
5577 }
5578
5579 /*
5580 * And now a mini version of register_netdevice unregister_netdevice.
5581 */
5582
5583 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005584 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005585
5586 /* And unlink it from device chain */
5587 err = -ENODEV;
5588 unlist_netdevice(dev);
5589
5590 synchronize_net();
5591
5592 /* Shutdown queueing discipline. */
5593 dev_shutdown(dev);
5594
5595 /* Notify protocols, that we are about to destroy
5596 this device. They should clean all the things.
5597 */
5598 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005599 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005600
5601 /*
5602 * Flush the unicast and multicast chains
5603 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005604 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005605 dev_addr_discard(dev);
5606
Eric W. Biederman38918452008-10-27 17:51:47 -07005607 netdev_unregister_kobject(dev);
5608
Eric W. Biedermance286d32007-09-12 13:53:49 +02005609 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005610 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005611
Eric W. Biedermance286d32007-09-12 13:53:49 +02005612 /* If there is an ifindex conflict assign a new one */
5613 if (__dev_get_by_index(net, dev->ifindex)) {
5614 int iflink = (dev->iflink == dev->ifindex);
5615 dev->ifindex = dev_new_index(net);
5616 if (iflink)
5617 dev->iflink = dev->ifindex;
5618 }
5619
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005620 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005621 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005622 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005623
5624 /* Add the device back in the hashes */
5625 list_netdevice(dev);
5626
5627 /* Notify protocols, that a new device appeared. */
5628 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5629
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005630 /*
5631 * Prevent userspace races by waiting until the network
5632 * device is fully setup before sending notifications.
5633 */
5634 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5635
Eric W. Biedermance286d32007-09-12 13:53:49 +02005636 synchronize_net();
5637 err = 0;
5638out:
5639 return err;
5640}
Johannes Berg463d0182009-07-14 00:33:35 +02005641EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005642
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643static int dev_cpu_callback(struct notifier_block *nfb,
5644 unsigned long action,
5645 void *ocpu)
5646{
5647 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005648 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649 struct sk_buff *skb;
5650 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5651 struct softnet_data *sd, *oldsd;
5652
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005653 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654 return NOTIFY_OK;
5655
5656 local_irq_disable();
5657 cpu = smp_processor_id();
5658 sd = &per_cpu(softnet_data, cpu);
5659 oldsd = &per_cpu(softnet_data, oldcpu);
5660
5661 /* Find end of our completion_queue. */
5662 list_skb = &sd->completion_queue;
5663 while (*list_skb)
5664 list_skb = &(*list_skb)->next;
5665 /* Append completion queue from offline CPU. */
5666 *list_skb = oldsd->completion_queue;
5667 oldsd->completion_queue = NULL;
5668
5669 /* Find end of our output_queue. */
5670 list_net = &sd->output_queue;
5671 while (*list_net)
5672 list_net = &(*list_net)->next_sched;
5673 /* Append output queue from offline CPU. */
5674 *list_net = oldsd->output_queue;
5675 oldsd->output_queue = NULL;
5676
5677 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5678 local_irq_enable();
5679
5680 /* Process offline CPU's input_pkt_queue */
5681 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5682 netif_rx(skb);
5683
5684 return NOTIFY_OK;
5685}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686
5687
Herbert Xu7f353bf2007-08-10 15:47:58 -07005688/**
Herbert Xub63365a2008-10-23 01:11:29 -07005689 * netdev_increment_features - increment feature set by one
5690 * @all: current feature set
5691 * @one: new feature set
5692 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005693 *
5694 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005695 * @one to the master device with current feature set @all. Will not
5696 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005697 */
Herbert Xub63365a2008-10-23 01:11:29 -07005698unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5699 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005700{
Herbert Xub63365a2008-10-23 01:11:29 -07005701 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005702 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005703 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5704 else if (mask & NETIF_F_ALL_CSUM) {
5705 /* If one device supports v4/v6 checksumming, set for all. */
5706 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5707 !(all & NETIF_F_GEN_CSUM)) {
5708 all &= ~NETIF_F_ALL_CSUM;
5709 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5710 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005711
Herbert Xub63365a2008-10-23 01:11:29 -07005712 /* If one device supports hw checksumming, set for all. */
5713 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5714 all &= ~NETIF_F_ALL_CSUM;
5715 all |= NETIF_F_HW_CSUM;
5716 }
5717 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005718
Herbert Xub63365a2008-10-23 01:11:29 -07005719 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005720
Herbert Xub63365a2008-10-23 01:11:29 -07005721 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005722 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005723 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005724
5725 return all;
5726}
Herbert Xub63365a2008-10-23 01:11:29 -07005727EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005728
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005729static struct hlist_head *netdev_create_hash(void)
5730{
5731 int i;
5732 struct hlist_head *hash;
5733
5734 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5735 if (hash != NULL)
5736 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5737 INIT_HLIST_HEAD(&hash[i]);
5738
5739 return hash;
5740}
5741
Eric W. Biederman881d9662007-09-17 11:56:21 -07005742/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005743static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005744{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005745 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005746
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005747 net->dev_name_head = netdev_create_hash();
5748 if (net->dev_name_head == NULL)
5749 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005750
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005751 net->dev_index_head = netdev_create_hash();
5752 if (net->dev_index_head == NULL)
5753 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005754
5755 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005756
5757err_idx:
5758 kfree(net->dev_name_head);
5759err_name:
5760 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005761}
5762
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005763/**
5764 * netdev_drivername - network driver for the device
5765 * @dev: network device
5766 * @buffer: buffer for resulting name
5767 * @len: size of buffer
5768 *
5769 * Determine network driver for device.
5770 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005771char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005772{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005773 const struct device_driver *driver;
5774 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005775
5776 if (len <= 0 || !buffer)
5777 return buffer;
5778 buffer[0] = 0;
5779
5780 parent = dev->dev.parent;
5781
5782 if (!parent)
5783 return buffer;
5784
5785 driver = parent->driver;
5786 if (driver && driver->name)
5787 strlcpy(buffer, driver->name, len);
5788 return buffer;
5789}
5790
Pavel Emelyanov46650792007-10-08 20:38:39 -07005791static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005792{
5793 kfree(net->dev_name_head);
5794 kfree(net->dev_index_head);
5795}
5796
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005797static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005798 .init = netdev_init,
5799 .exit = netdev_exit,
5800};
5801
Pavel Emelyanov46650792007-10-08 20:38:39 -07005802static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005803{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005804 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005805 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005806 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02005807 * initial network namespace
5808 */
5809 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005810 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005811 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005812 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005813
5814 /* Ignore unmoveable devices (i.e. loopback) */
5815 if (dev->features & NETIF_F_NETNS_LOCAL)
5816 continue;
5817
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005818 /* Leave virtual devices for the generic cleanup */
5819 if (dev->rtnl_link_ops)
5820 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005821
Eric W. Biedermance286d32007-09-12 13:53:49 +02005822 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005823 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5824 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005825 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005826 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005827 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005828 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005829 }
5830 }
5831 rtnl_unlock();
5832}
5833
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005834static void __net_exit default_device_exit_batch(struct list_head *net_list)
5835{
5836 /* At exit all network devices most be removed from a network
5837 * namespace. Do this in the reverse order of registeration.
5838 * Do this across as many network namespaces as possible to
5839 * improve batching efficiency.
5840 */
5841 struct net_device *dev;
5842 struct net *net;
5843 LIST_HEAD(dev_kill_list);
5844
5845 rtnl_lock();
5846 list_for_each_entry(net, net_list, exit_list) {
5847 for_each_netdev_reverse(net, dev) {
5848 if (dev->rtnl_link_ops)
5849 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5850 else
5851 unregister_netdevice_queue(dev, &dev_kill_list);
5852 }
5853 }
5854 unregister_netdevice_many(&dev_kill_list);
5855 rtnl_unlock();
5856}
5857
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005858static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005859 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005860 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02005861};
5862
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863/*
5864 * Initialize the DEV module. At boot time this walks the device list and
5865 * unhooks any devices that fail to initialise (normally hardware not
5866 * present) and leaves us with a valid list of present and active devices.
5867 *
5868 */
5869
5870/*
5871 * This is called single threaded during boot, so no need
5872 * to take the rtnl semaphore.
5873 */
5874static int __init net_dev_init(void)
5875{
5876 int i, rc = -ENOMEM;
5877
5878 BUG_ON(!dev_boot_phase);
5879
Linus Torvalds1da177e2005-04-16 15:20:36 -07005880 if (dev_proc_init())
5881 goto out;
5882
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005883 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005884 goto out;
5885
5886 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005887 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005888 INIT_LIST_HEAD(&ptype_base[i]);
5889
Eric W. Biederman881d9662007-09-17 11:56:21 -07005890 if (register_pernet_subsys(&netdev_net_ops))
5891 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892
5893 /*
5894 * Initialise the packet receive queues.
5895 */
5896
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005897 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898 struct softnet_data *queue;
5899
5900 queue = &per_cpu(softnet_data, i);
5901 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902 queue->completion_queue = NULL;
5903 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005904
5905 queue->backlog.poll = process_backlog;
5906 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005907 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005908 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 }
5910
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911 dev_boot_phase = 0;
5912
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005913 /* The loopback device is special if any other network devices
5914 * is present in a network namespace the loopback device must
5915 * be present. Since we now dynamically allocate and free the
5916 * loopback device ensure this invariant is maintained by
5917 * keeping the loopback device as the first device on the
5918 * list of network devices. Ensuring the loopback devices
5919 * is the first device that appears and the last network device
5920 * that disappears.
5921 */
5922 if (register_pernet_device(&loopback_net_ops))
5923 goto out;
5924
5925 if (register_pernet_device(&default_device_ops))
5926 goto out;
5927
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005928 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5929 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930
5931 hotcpu_notifier(dev_cpu_callback, 0);
5932 dst_init();
5933 dev_mcast_init();
5934 rc = 0;
5935out:
5936 return rc;
5937}
5938
5939subsys_initcall(net_dev_init);
5940
Krishna Kumare88721f2009-02-18 17:55:02 -08005941static int __init initialize_hashrnd(void)
5942{
5943 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5944 return 0;
5945}
5946
5947late_initcall_sync(initialize_hashrnd);
5948