blob: ec874218b20638887a54140ee3612f50659743b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
103#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700104#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
106#include <net/pkt_sched.h>
107#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000108#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/highmem.h>
110#include <linux/init.h>
111#include <linux/kmod.h>
112#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700116#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700127#include <linux/ipv6.h>
128#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700129#include <linux/jhash.h>
130#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700131#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700152 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700174static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * semaphore.
179 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 *
182 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700183 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196EXPORT_SYMBOL(dev_base_lock);
197
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Eric W. Biedermance286d32007-09-12 13:53:49 +0200209/* Device list insertion */
210static int list_netdevice(struct net_device *dev)
211{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900212 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213
214 ASSERT_RTNL();
215
216 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219 hlist_add_head_rcu(&dev->index_hlist,
220 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000225/* Device list removal
226 * caller must respect a RCU grace period before freeing/reusing dev
227 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228static void unlist_netdevice(struct net_device *dev)
229{
230 ASSERT_RTNL();
231
232 /* Unlink dev from the device chain */
233 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800234 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000235 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237 write_unlock_bh(&dev_base_lock);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/*
241 * Our notifier list
242 */
243
Alan Sternf07d5b92006-05-09 15:23:03 -0700244static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246/*
247 * Device drivers call our routines to queue packets here. We empty the
248 * queue in the local softnet handler.
249 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700250
251DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700252EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David S. Millercf508b12008-07-22 14:16:42 -0700254#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255/*
David S. Millerc773e842008-07-08 23:13:53 -0700256 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700257 * according to dev->type
258 */
259static const unsigned short netdev_lock_type[] =
260 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
261 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
262 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
263 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
264 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
265 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
266 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
267 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
268 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
269 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
270 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
271 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
272 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800273 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400274 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000275 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700277static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700278 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
279 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
280 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
281 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
282 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
283 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
284 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
285 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
286 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
287 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
288 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
289 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
290 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800291 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400292 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000293 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294
295static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700296static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static inline unsigned short netdev_lock_pos(unsigned short dev_type)
299{
300 int i;
301
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
304 return i;
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
307}
308
David S. Millercf508b12008-07-22 14:16:42 -0700309static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311{
312 int i;
313
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
317}
David S. Millercf508b12008-07-22 14:16:42 -0700318
319static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
320{
321 int i;
322
323 i = netdev_lock_pos(dev->type);
324 lockdep_set_class_and_name(&dev->addr_list_lock,
325 &netdev_addr_lock_key[i],
326 netdev_lock_name[i]);
327}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328#else
David S. Millercf508b12008-07-22 14:16:42 -0700329static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
330 unsigned short dev_type)
331{
332}
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700334{
335}
336#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/*******************************************************************************
339
340 Protocol management and registration routines
341
342*******************************************************************************/
343
344/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Add a protocol ID to the list. Now that the input handler is
346 * smarter we can dispense with all the messy stuff that used to be
347 * here.
348 *
349 * BEWARE!!! Protocol handlers, mangling input packets,
350 * MUST BE last in hash buckets and checking protocol handlers
351 * MUST start from promiscuous ptype_all chain in net_bh.
352 * It is true now, do not change it.
353 * Explanation follows: if protocol handler, mangling packet, will
354 * be the first on list, it is not able to sense, that packet
355 * is cloned and should be copied-on-write, so that it will
356 * change it and subsequent readers will get broken packet.
357 * --ANK (980803)
358 */
359
360/**
361 * dev_add_pack - add packet handler
362 * @pt: packet type declaration
363 *
364 * Add a protocol handler to the networking stack. The passed &packet_type
365 * is linked into kernel lists and may not be freed until it has been
366 * removed from the kernel lists.
367 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900368 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 * guarantee all CPU's that are in middle of receiving packets
370 * will see the new packet type (until the next received packet).
371 */
372
373void dev_add_pack(struct packet_type *pt)
374{
375 int hash;
376
377 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700380 else {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 list_add_rcu(&pt->list, &ptype_base[hash]);
383 }
384 spin_unlock_bh(&ptype_lock);
385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700386EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
389 * __dev_remove_pack - remove packet handler
390 * @pt: packet type declaration
391 *
392 * Remove a protocol handler that was previously added to the kernel
393 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
394 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900395 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 *
397 * The packet type might still be in use by receivers
398 * and must not be freed until after all the CPU's have gone
399 * through a quiescent state.
400 */
401void __dev_remove_pack(struct packet_type *pt)
402{
403 struct list_head *head;
404 struct packet_type *pt1;
405
406 spin_lock_bh(&ptype_lock);
407
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700410 else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800411 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
420 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
421out:
422 spin_unlock_bh(&ptype_lock);
423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446/******************************************************************************
447
448 Device Boot-time Settings Routines
449
450*******************************************************************************/
451
452/* Boot time configuration table */
453static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454
455/**
456 * netdev_boot_setup_add - add new setup entry
457 * @name: name of the device
458 * @map: configured settings for the device
459 *
460 * Adds new setup entry to the dev_boot_setup list. The function
461 * returns 0 on error and 1 on success. This is a generic routine to
462 * all netdevices.
463 */
464static int netdev_boot_setup_add(char *name, struct ifmap *map)
465{
466 struct netdev_boot_setup *s;
467 int i;
468
469 s = dev_boot_setup;
470 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
471 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
472 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700473 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 memcpy(&s[i].map, map, sizeof(s[i].map));
475 break;
476 }
477 }
478
479 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
480}
481
482/**
483 * netdev_boot_setup_check - check boot time settings
484 * @dev: the netdevice
485 *
486 * Check boot time settings for the device.
487 * The found settings are set for the device to be used
488 * later in the device probing.
489 * Returns 0 if no settings found, 1 if they are.
490 */
491int netdev_boot_setup_check(struct net_device *dev)
492{
493 struct netdev_boot_setup *s = dev_boot_setup;
494 int i;
495
496 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
497 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700498 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 dev->irq = s[i].map.irq;
500 dev->base_addr = s[i].map.base_addr;
501 dev->mem_start = s[i].map.mem_start;
502 dev->mem_end = s[i].map.mem_end;
503 return 1;
504 }
505 }
506 return 0;
507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700508EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510
511/**
512 * netdev_boot_base - get address from boot time settings
513 * @prefix: prefix for network device
514 * @unit: id for network device
515 *
516 * Check boot time settings for the base address of device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found.
520 */
521unsigned long netdev_boot_base(const char *prefix, int unit)
522{
523 const struct netdev_boot_setup *s = dev_boot_setup;
524 char name[IFNAMSIZ];
525 int i;
526
527 sprintf(name, "%s%d", prefix, unit);
528
529 /*
530 * If device already registered then return base of 1
531 * to indicate not to probe for this interface
532 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700533 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 1;
535
536 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
537 if (!strcmp(name, s[i].name))
538 return s[i].map.base_addr;
539 return 0;
540}
541
542/*
543 * Saves at boot time configured settings for any netdevice.
544 */
545int __init netdev_boot_setup(char *str)
546{
547 int ints[5];
548 struct ifmap map;
549
550 str = get_options(str, ARRAY_SIZE(ints), ints);
551 if (!str || !*str)
552 return 0;
553
554 /* Save settings */
555 memset(&map, 0, sizeof(map));
556 if (ints[0] > 0)
557 map.irq = ints[1];
558 if (ints[0] > 1)
559 map.base_addr = ints[2];
560 if (ints[0] > 2)
561 map.mem_start = ints[3];
562 if (ints[0] > 3)
563 map.mem_end = ints[4];
564
565 /* Add new entry to the list */
566 return netdev_boot_setup_add(str, &map);
567}
568
569__setup("netdev=", netdev_boot_setup);
570
571/*******************************************************************************
572
573 Device Interface Subroutines
574
575*******************************************************************************/
576
577/**
578 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700579 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * @name: name to find
581 *
582 * Find an interface by name. Must be called under RTNL semaphore
583 * or @dev_base_lock. If the name is found a pointer to the device
584 * is returned. If the name is not found then %NULL is returned. The
585 * reference counters are not incremented so the caller must be
586 * careful with locks.
587 */
588
Eric W. Biederman881d9662007-09-17 11:56:21 -0700589struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700595 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (!strncmp(dev->name, name, IFNAMSIZ))
597 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 return NULL;
600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700601EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
607 *
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
613 */
614
615struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
616{
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
620
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
623 return dev;
624
625 return NULL;
626}
627EXPORT_SYMBOL(dev_get_by_name_rcu);
628
629/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @name: name to find
633 *
634 * Find an interface by name. This can be called from any
635 * context and does its own locking. The returned handle has
636 * the usage count incremented and the caller must use dev_put() to
637 * release it when it is no longer needed. %NULL is returned if no
638 * matching device is found.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct net_device *dev;
644
Eric Dumazet72c95282009-10-30 07:11:27 +0000645 rcu_read_lock();
646 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (dev)
648 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000649 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return dev;
651}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700652EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654/**
655 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700656 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * @ifindex: index of device
658 *
659 * Search for an interface by index. Returns %NULL if the device
660 * is not found or a pointer to the device. The device has not
661 * had its reference counter increased so the caller must be careful
662 * about locking. The caller must hold either the RTNL semaphore
663 * or @dev_base_lock.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700672 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (dev->ifindex == ifindex)
674 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return NULL;
677}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700678EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000680/**
681 * dev_get_by_index_rcu - find a device by its ifindex
682 * @net: the applicable net namespace
683 * @ifindex: index of device
684 *
685 * Search for an interface by index. Returns %NULL if the device
686 * is not found or a pointer to the device. The device has not
687 * had its reference counter increased so the caller must be careful
688 * about locking. The caller must hold RCU lock.
689 */
690
691struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
692{
693 struct hlist_node *p;
694 struct net_device *dev;
695 struct hlist_head *head = dev_index_hash(net, ifindex);
696
697 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
698 if (dev->ifindex == ifindex)
699 return dev;
700
701 return NULL;
702}
703EXPORT_SYMBOL(dev_get_by_index_rcu);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/**
707 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700708 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * @ifindex: index of device
710 *
711 * Search for an interface by index. Returns NULL if the device
712 * is not found or a pointer to the device. The device returned has
713 * had a reference added and the pointer is safe until the user calls
714 * dev_put to indicate they have finished with it.
715 */
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000721 rcu_read_lock();
722 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (dev)
724 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000725 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return dev;
727}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700728EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730/**
731 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700732 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * @type: media type of device
734 * @ha: hardware address
735 *
736 * Search for an interface by MAC address. Returns NULL if the device
737 * is not found or a pointer to the device. The caller must hold the
738 * rtnl semaphore. The returned device has not had its ref count increased
739 * and the caller must therefore be careful about locking
740 *
741 * BUGS:
742 * If the API was consistent this would be __dev_get_by_hwaddr
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
749 ASSERT_RTNL();
750
Denis V. Lunev81103a52007-12-12 10:47:38 -0800751 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (dev->type == type &&
753 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700754 return dev;
755
756 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300758EXPORT_SYMBOL(dev_getbyhwaddr);
759
Eric W. Biederman881d9662007-09-17 11:56:21 -0700760struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700761{
762 struct net_device *dev;
763
764 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700765 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700766 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700767 return dev;
768
769 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700770}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700771EXPORT_SYMBOL(__dev_getfirstbyhwtype);
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct net_device *dev;
776
777 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700778 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700779 if (dev)
780 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 rtnl_unlock();
782 return dev;
783}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784EXPORT_SYMBOL(dev_getfirstbyhwtype);
785
786/**
787 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700788 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * @if_flags: IFF_* values
790 * @mask: bitmask of bits in if_flags to check
791 *
792 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900793 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
796 */
797
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700798struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
799 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Pavel Emelianov7562f872007-05-03 15:13:45 -0700803 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (((dev->flags ^ if_flags) & mask) == 0) {
807 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700808 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810 }
811 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800812 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700813 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700815EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/**
818 * dev_valid_name - check if name is okay for network device
819 * @name: name string
820 *
821 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700822 * to allow sysfs to work. We also disallow any kind of
823 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800825int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700827 if (*name == '\0')
828 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700829 if (strlen(name) >= IFNAMSIZ)
830 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700831 if (!strcmp(name, ".") || !strcmp(name, ".."))
832 return 0;
833
834 while (*name) {
835 if (*name == '/' || isspace(*name))
836 return 0;
837 name++;
838 }
839 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700841EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 * __dev_alloc_name - allocate a name for a device
845 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200847 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 */
857
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200858static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 const char *p;
862 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700863 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct net_device *d;
865
866 p = strnchr(name, IFNAMSIZ-1, '%');
867 if (p) {
868 /*
869 * Verify the string as this thing may have come from
870 * the user. There must be either one "%d" and no other "%"
871 * characters.
872 */
873 if (p[1] != 'd' || strchr(p + 2, '%'))
874 return -EINVAL;
875
876 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700877 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (!inuse)
879 return -ENOMEM;
880
Eric W. Biederman881d9662007-09-17 11:56:21 -0700881 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (!sscanf(d->name, name, &i))
883 continue;
884 if (i < 0 || i >= max_netdevices)
885 continue;
886
887 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200888 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (!strncmp(buf, d->name, IFNAMSIZ))
890 set_bit(i, inuse);
891 }
892
893 i = find_first_zero_bit(inuse, max_netdevices);
894 free_page((unsigned long) inuse);
895 }
896
Octavian Purdilad9031022009-11-18 02:36:59 +0000897 if (buf != name)
898 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200899 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 /* It is possible to run out of possible slots
903 * when the name is long and there isn't enough space left
904 * for the digits, or if all bits are used.
905 */
906 return -ENFILE;
907}
908
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200909/**
910 * dev_alloc_name - allocate a name for a device
911 * @dev: device
912 * @name: name format string
913 *
914 * Passed a format string - eg "lt%d" it will try and find a suitable
915 * id. It scans list of devices to build up a free map, then chooses
916 * the first empty slot. The caller must hold the dev_base or rtnl lock
917 * while allocating the name and adding the device in order to avoid
918 * duplicates.
919 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
920 * Returns the number of the unit assigned or a negative errno code.
921 */
922
923int dev_alloc_name(struct net_device *dev, const char *name)
924{
925 char buf[IFNAMSIZ];
926 struct net *net;
927 int ret;
928
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900929 BUG_ON(!dev_net(dev));
930 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200931 ret = __dev_alloc_name(net, name, buf);
932 if (ret >= 0)
933 strlcpy(dev->name, buf, IFNAMSIZ);
934 return ret;
935}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700936EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200937
Octavian Purdilad9031022009-11-18 02:36:59 +0000938static int dev_get_valid_name(struct net *net, const char *name, char *buf,
939 bool fmt)
940{
941 if (!dev_valid_name(name))
942 return -EINVAL;
943
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
947 return -EEXIST;
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
950
951 return 0;
952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954/**
955 * dev_change_name - change name of a device
956 * @dev: device
957 * @newname: name (or format string) must be at least IFNAMSIZ
958 *
959 * Change name of a device, can pass format strings "eth%d".
960 * for wildcarding.
961 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -0700962int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700966 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700967 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900970 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900972 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (dev->flags & IFF_UP)
974 return -EBUSY;
975
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
977 return 0;
978
Herbert Xufcc5a032007-07-30 17:03:38 -0700979 memcpy(oldname, dev->name, IFNAMSIZ);
980
Octavian Purdilad9031022009-11-18 02:36:59 +0000981 err = dev_get_valid_name(net, newname, dev->name, 1);
982 if (err < 0)
983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Herbert Xufcc5a032007-07-30 17:03:38 -0700985rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700986 /* For now only devices in the initial network namespace
987 * are in sysfs.
988 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800989 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -0700990 ret = device_rename(&dev->dev, dev->name);
991 if (ret) {
992 memcpy(dev->name, oldname, IFNAMSIZ);
993 return ret;
994 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700995 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700996
997 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600998 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000999 write_unlock_bh(&dev_base_lock);
1000
1001 synchronize_rcu();
1002
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001005 write_unlock_bh(&dev_base_lock);
1006
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001008 ret = notifier_to_errno(ret);
1009
1010 if (ret) {
Eric Dumazet91e9c072009-11-15 23:30:24 +00001011 /* err >= 0 after dev_alloc_name() or stores the first errno */
1012 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001013 err = ret;
1014 memcpy(dev->name, oldname, IFNAMSIZ);
1015 goto rollback;
Eric Dumazet91e9c072009-11-15 23:30:24 +00001016 } else {
1017 printk(KERN_ERR
1018 "%s: name change rollback failed: %d.\n",
1019 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001020 }
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 return err;
1024}
1025
1026/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001027 * dev_set_alias - change ifalias of a device
1028 * @dev: device
1029 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001030 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001031 *
1032 * Set ifalias for a device,
1033 */
1034int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1035{
1036 ASSERT_RTNL();
1037
1038 if (len >= IFALIASZ)
1039 return -EINVAL;
1040
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001041 if (!len) {
1042 if (dev->ifalias) {
1043 kfree(dev->ifalias);
1044 dev->ifalias = NULL;
1045 }
1046 return 0;
1047 }
1048
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001049 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001050 if (!dev->ifalias)
1051 return -ENOMEM;
1052
1053 strlcpy(dev->ifalias, alias, len+1);
1054 return len;
1055}
1056
1057
1058/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001059 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001060 * @dev: device to cause notification
1061 *
1062 * Called to indicate a device has changed features.
1063 */
1064void netdev_features_change(struct net_device *dev)
1065{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001066 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001067}
1068EXPORT_SYMBOL(netdev_features_change);
1069
1070/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 * netdev_state_change - device changes state
1072 * @dev: device to cause notification
1073 *
1074 * Called to indicate a device has changed state. This function calls
1075 * the notifier chains for netdev_chain and sends a NEWLINK message
1076 * to the routing socket.
1077 */
1078void netdev_state_change(struct net_device *dev)
1079{
1080 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001081 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1083 }
1084}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001085EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Moni Shoua75c78502009-09-15 02:37:40 -07001087void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001088{
Moni Shoua75c78502009-09-15 02:37:40 -07001089 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001090}
1091EXPORT_SYMBOL(netdev_bonding_change);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/**
1094 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001095 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 * @name: name of interface
1097 *
1098 * If a network interface is not present and the process has suitable
1099 * privileges this function loads the module. If module loading is not
1100 * available in this kernel then it becomes a nop.
1101 */
1102
Eric W. Biederman881d9662007-09-17 11:56:21 -07001103void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001105 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Eric Dumazet72c95282009-10-30 07:11:27 +00001107 rcu_read_lock();
1108 dev = dev_get_by_name_rcu(net, name);
1109 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Eric Parisa8f80e82009-08-13 09:44:51 -04001111 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 request_module("%s", name);
1113}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001114EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116/**
1117 * dev_open - prepare an interface for use.
1118 * @dev: device to open
1119 *
1120 * Takes a device from down to up state. The device's private open
1121 * function is invoked and then the multicast lists are loaded. Finally
1122 * the device is moved into the up state and a %NETDEV_UP message is
1123 * sent to the netdev notifier chain.
1124 *
1125 * Calling this function on an active interface is a nop. On a failure
1126 * a negative errno code is returned.
1127 */
1128int dev_open(struct net_device *dev)
1129{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001130 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001131 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001133 ASSERT_RTNL();
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 /*
1136 * Is it already up?
1137 */
1138
1139 if (dev->flags & IFF_UP)
1140 return 0;
1141
1142 /*
1143 * Is it even present?
1144 */
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 /*
1154 * Call device private open method
1155 */
1156 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001157
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001160
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001164 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 * If it went open OK then:
1166 */
1167
Jeff Garzikbada3392007-10-23 20:19:37 -07001168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 /*
1172 * Set the flags.
1173 */
1174 dev->flags |= IFF_UP;
1175
1176 /*
Dan Williams649274d2009-01-11 00:20:39 -08001177 * Enable NET_DMA
1178 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001179 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001180
1181 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 * Initialize multicasting status
1183 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001184 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 /*
1187 * Wakeup transmit queue engine
1188 */
1189 dev_activate(dev);
1190
1191 /*
1192 * ... and announce new interface.
1193 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001194 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return ret;
1198}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001199EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201/**
1202 * dev_close - shutdown an interface.
1203 * @dev: device to shutdown
1204 *
1205 * This function moves an active device into down state. A
1206 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1207 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1208 * chain.
1209 */
1210int dev_close(struct net_device *dev)
1211{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001212 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001213 ASSERT_RTNL();
1214
David S. Miller9d5010d2007-09-12 14:33:25 +02001215 might_sleep();
1216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 if (!(dev->flags & IFF_UP))
1218 return 0;
1219
1220 /*
1221 * Tell people we are going down, so that they can
1222 * prepare to death, when device is still operating.
1223 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001224 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 clear_bit(__LINK_STATE_START, &dev->state);
1227
1228 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001229 * it can be even on different cpu. So just clear netif_running().
1230 *
1231 * dev->stop() will invoke napi_disable() on all of it's
1232 * napi_struct instances on this device.
1233 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001236 dev_deactivate(dev);
1237
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 /*
1239 * Call the device specific close. This cannot fail.
1240 * Only if device is UP
1241 *
1242 * We allow it to be called even after a DETACH hot-plug
1243 * event.
1244 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001245 if (ops->ndo_stop)
1246 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
1248 /*
1249 * Device is now down.
1250 */
1251
1252 dev->flags &= ~IFF_UP;
1253
1254 /*
1255 * Tell people we are down
1256 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001257 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Dan Williams649274d2009-01-11 00:20:39 -08001259 /*
1260 * Shutdown NET_DMA
1261 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001262 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 return 0;
1265}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001266EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
1268
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001269/**
1270 * dev_disable_lro - disable Large Receive Offload on a device
1271 * @dev: device
1272 *
1273 * Disable Large Receive Offload (LRO) on a net device. Must be
1274 * called under RTNL. This is needed if received packets may be
1275 * forwarded to another interface.
1276 */
1277void dev_disable_lro(struct net_device *dev)
1278{
1279 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1280 dev->ethtool_ops->set_flags) {
1281 u32 flags = dev->ethtool_ops->get_flags(dev);
1282 if (flags & ETH_FLAG_LRO) {
1283 flags &= ~ETH_FLAG_LRO;
1284 dev->ethtool_ops->set_flags(dev, flags);
1285 }
1286 }
1287 WARN_ON(dev->features & NETIF_F_LRO);
1288}
1289EXPORT_SYMBOL(dev_disable_lro);
1290
1291
Eric W. Biederman881d9662007-09-17 11:56:21 -07001292static int dev_boot_phase = 1;
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294/*
1295 * Device change register/unregister. These are not inline or static
1296 * as we export them to the world.
1297 */
1298
1299/**
1300 * register_netdevice_notifier - register a network notifier block
1301 * @nb: notifier
1302 *
1303 * Register a notifier to be called when network device events occur.
1304 * The notifier passed is linked into the kernel structures and must
1305 * not be reused until it has been unregistered. A negative errno code
1306 * is returned on a failure.
1307 *
1308 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001309 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 * view of the network device list.
1311 */
1312
1313int register_netdevice_notifier(struct notifier_block *nb)
1314{
1315 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001316 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001317 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 int err;
1319
1320 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001321 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001322 if (err)
1323 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001324 if (dev_boot_phase)
1325 goto unlock;
1326 for_each_net(net) {
1327 for_each_netdev(net, dev) {
1328 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1329 err = notifier_to_errno(err);
1330 if (err)
1331 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Eric W. Biederman881d9662007-09-17 11:56:21 -07001333 if (!(dev->flags & IFF_UP))
1334 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001335
Eric W. Biederman881d9662007-09-17 11:56:21 -07001336 nb->notifier_call(nb, NETDEV_UP, dev);
1337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001339
1340unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 rtnl_unlock();
1342 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001343
1344rollback:
1345 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001346 for_each_net(net) {
1347 for_each_netdev(net, dev) {
1348 if (dev == last)
1349 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001350
Eric W. Biederman881d9662007-09-17 11:56:21 -07001351 if (dev->flags & IFF_UP) {
1352 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1353 nb->notifier_call(nb, NETDEV_DOWN, dev);
1354 }
1355 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001356 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001357 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001358 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001359
1360 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001361 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001363EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
1365/**
1366 * unregister_netdevice_notifier - unregister a network notifier block
1367 * @nb: notifier
1368 *
1369 * Unregister a notifier previously registered by
1370 * register_netdevice_notifier(). The notifier is unlinked into the
1371 * kernel structures and may then be reused. A negative errno code
1372 * is returned on a failure.
1373 */
1374
1375int unregister_netdevice_notifier(struct notifier_block *nb)
1376{
Herbert Xu9f514952006-03-25 01:24:25 -08001377 int err;
1378
1379 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001380 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001381 rtnl_unlock();
1382 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001384EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
1386/**
1387 * call_netdevice_notifiers - call all network notifier blocks
1388 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001389 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 *
1391 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001392 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 */
1394
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001395int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001397 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398}
1399
1400/* When > 0 there are consumers of rx skb time stamps */
1401static atomic_t netstamp_needed = ATOMIC_INIT(0);
1402
1403void net_enable_timestamp(void)
1404{
1405 atomic_inc(&netstamp_needed);
1406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409void net_disable_timestamp(void)
1410{
1411 atomic_dec(&netstamp_needed);
1412}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001413EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001415static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
1417 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001418 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001419 else
1420 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
Arnd Bergmann44540962009-11-26 06:07:08 +00001423/**
1424 * dev_forward_skb - loopback an skb to another netif
1425 *
1426 * @dev: destination network device
1427 * @skb: buffer to forward
1428 *
1429 * return values:
1430 * NET_RX_SUCCESS (no congestion)
1431 * NET_RX_DROP (packet was dropped)
1432 *
1433 * dev_forward_skb can be used for injecting an skb from the
1434 * start_xmit function of one device into the receive queue
1435 * of another device.
1436 *
1437 * The receiving device may be in another namespace, so
1438 * we have to clear all information in the skb that could
1439 * impact namespace isolation.
1440 */
1441int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1442{
1443 skb_orphan(skb);
1444
1445 if (!(dev->flags & IFF_UP))
1446 return NET_RX_DROP;
1447
1448 if (skb->len > (dev->mtu + dev->hard_header_len))
1449 return NET_RX_DROP;
1450
1451 skb_dst_drop(skb);
1452 skb->tstamp.tv64 = 0;
1453 skb->pkt_type = PACKET_HOST;
1454 skb->protocol = eth_type_trans(skb, dev);
1455 skb->mark = 0;
1456 secpath_reset(skb);
1457 nf_reset(skb);
1458 return netif_rx(skb);
1459}
1460EXPORT_SYMBOL_GPL(dev_forward_skb);
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462/*
1463 * Support routine. Sends outgoing frames to any network
1464 * taps currently in use.
1465 */
1466
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001467static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468{
1469 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001470
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001471#ifdef CONFIG_NET_CLS_ACT
1472 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1473 net_timestamp(skb);
1474#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001475 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001476#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 rcu_read_lock();
1479 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1480 /* Never send packets back to the socket
1481 * they originated from - MvS (miquels@drinkel.ow.org)
1482 */
1483 if ((ptype->dev == dev || !ptype->dev) &&
1484 (ptype->af_packet_priv == NULL ||
1485 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001486 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 if (!skb2)
1488 break;
1489
1490 /* skb->nh should be correctly
1491 set by sender, so that the second statement is
1492 just protection against buggy protocols.
1493 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001494 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001496 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001497 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (net_ratelimit())
1499 printk(KERN_CRIT "protocol %04x is "
1500 "buggy, dev %s\n",
1501 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001502 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 }
1504
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001505 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001507 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509 }
1510 rcu_read_unlock();
1511}
1512
Denis Vlasenko56079432006-03-29 15:57:29 -08001513
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001514static inline void __netif_reschedule(struct Qdisc *q)
1515{
1516 struct softnet_data *sd;
1517 unsigned long flags;
1518
1519 local_irq_save(flags);
1520 sd = &__get_cpu_var(softnet_data);
1521 q->next_sched = sd->output_queue;
1522 sd->output_queue = q;
1523 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1524 local_irq_restore(flags);
1525}
1526
David S. Miller37437bb2008-07-16 02:15:04 -07001527void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001528{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001529 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1530 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001531}
1532EXPORT_SYMBOL(__netif_schedule);
1533
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001534void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001535{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001536 if (atomic_dec_and_test(&skb->users)) {
1537 struct softnet_data *sd;
1538 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001539
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001540 local_irq_save(flags);
1541 sd = &__get_cpu_var(softnet_data);
1542 skb->next = sd->completion_queue;
1543 sd->completion_queue = skb;
1544 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1545 local_irq_restore(flags);
1546 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001547}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001548EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001549
1550void dev_kfree_skb_any(struct sk_buff *skb)
1551{
1552 if (in_irq() || irqs_disabled())
1553 dev_kfree_skb_irq(skb);
1554 else
1555 dev_kfree_skb(skb);
1556}
1557EXPORT_SYMBOL(dev_kfree_skb_any);
1558
1559
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001560/**
1561 * netif_device_detach - mark device as removed
1562 * @dev: network device
1563 *
1564 * Mark device as removed from system and therefore no longer available.
1565 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001566void netif_device_detach(struct net_device *dev)
1567{
1568 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1569 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001570 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001571 }
1572}
1573EXPORT_SYMBOL(netif_device_detach);
1574
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001575/**
1576 * netif_device_attach - mark device as attached
1577 * @dev: network device
1578 *
1579 * Mark device as attached from system and restart if needed.
1580 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001581void netif_device_attach(struct net_device *dev)
1582{
1583 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1584 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001585 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001586 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001587 }
1588}
1589EXPORT_SYMBOL(netif_device_attach);
1590
Ben Hutchings6de329e2008-06-16 17:02:28 -07001591static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1592{
1593 return ((features & NETIF_F_GEN_CSUM) ||
1594 ((features & NETIF_F_IP_CSUM) &&
1595 protocol == htons(ETH_P_IP)) ||
1596 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001597 protocol == htons(ETH_P_IPV6)) ||
1598 ((features & NETIF_F_FCOE_CRC) &&
1599 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001600}
1601
1602static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1603{
1604 if (can_checksum_protocol(dev->features, skb->protocol))
1605 return true;
1606
1607 if (skb->protocol == htons(ETH_P_8021Q)) {
1608 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1609 if (can_checksum_protocol(dev->features & dev->vlan_features,
1610 veh->h_vlan_encapsulated_proto))
1611 return true;
1612 }
1613
1614 return false;
1615}
Denis Vlasenko56079432006-03-29 15:57:29 -08001616
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617/*
1618 * Invalidate hardware checksum when packet is to be mangled, and
1619 * complete checksum manually on outgoing path.
1620 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001621int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622{
Al Virod3bc23e2006-11-14 21:24:49 -08001623 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001624 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Patrick McHardy84fa7932006-08-29 16:44:56 -07001626 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001627 goto out_set_summed;
1628
1629 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001630 /* Let GSO fix up the checksum. */
1631 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 }
1633
Herbert Xua0308472007-10-15 01:47:15 -07001634 offset = skb->csum_start - skb_headroom(skb);
1635 BUG_ON(offset >= skb_headlen(skb));
1636 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1637
1638 offset += skb->csum_offset;
1639 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1640
1641 if (skb_cloned(skb) &&
1642 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1644 if (ret)
1645 goto out;
1646 }
1647
Herbert Xua0308472007-10-15 01:47:15 -07001648 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001649out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001651out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 return ret;
1653}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001654EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001656/**
1657 * skb_gso_segment - Perform segmentation on skb.
1658 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001660 *
1661 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001662 *
1663 * It may return NULL if the skb requires no segmentation. This is
1664 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001666struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001667{
1668 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1669 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001670 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001671 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001672
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001673 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001674 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001675 __skb_pull(skb, skb->mac_len);
1676
Herbert Xu67fd1a72009-01-19 16:26:44 -08001677 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1678 struct net_device *dev = skb->dev;
1679 struct ethtool_drvinfo info = {};
1680
1681 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1682 dev->ethtool_ops->get_drvinfo(dev, &info);
1683
1684 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1685 "ip_summed=%d",
1686 info.driver, dev ? dev->features : 0L,
1687 skb->sk ? skb->sk->sk_route_caps : 0L,
1688 skb->len, skb->data_len, skb->ip_summed);
1689
Herbert Xua430a432006-07-08 13:34:56 -07001690 if (skb_header_cloned(skb) &&
1691 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1692 return ERR_PTR(err);
1693 }
1694
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001695 rcu_read_lock();
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08001696 list_for_each_entry_rcu(ptype,
1697 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001699 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001700 err = ptype->gso_send_check(skb);
1701 segs = ERR_PTR(err);
1702 if (err || skb_gso_ok(skb, features))
1703 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001704 __skb_push(skb, (skb->data -
1705 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001706 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001707 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001708 break;
1709 }
1710 }
1711 rcu_read_unlock();
1712
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001713 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001714
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001715 return segs;
1716}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001717EXPORT_SYMBOL(skb_gso_segment);
1718
Herbert Xufb286bb2005-11-10 13:01:24 -08001719/* Take action when hardware reception checksum errors are detected. */
1720#ifdef CONFIG_BUG
1721void netdev_rx_csum_fault(struct net_device *dev)
1722{
1723 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001724 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001725 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001726 dump_stack();
1727 }
1728}
1729EXPORT_SYMBOL(netdev_rx_csum_fault);
1730#endif
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732/* Actually, we should eliminate this check as soon as we know, that:
1733 * 1. IOMMU is present and allows to map all the memory.
1734 * 2. No high memory really exists on this machine.
1735 */
1736
1737static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1738{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001739#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 int i;
1741
1742 if (dev->features & NETIF_F_HIGHDMA)
1743 return 0;
1744
1745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1746 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1747 return 1;
1748
Herbert Xu3d3a8532006-06-27 13:33:10 -07001749#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 return 0;
1751}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001753struct dev_gso_cb {
1754 void (*destructor)(struct sk_buff *skb);
1755};
1756
1757#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1758
1759static void dev_gso_skb_destructor(struct sk_buff *skb)
1760{
1761 struct dev_gso_cb *cb;
1762
1763 do {
1764 struct sk_buff *nskb = skb->next;
1765
1766 skb->next = nskb->next;
1767 nskb->next = NULL;
1768 kfree_skb(nskb);
1769 } while (skb->next);
1770
1771 cb = DEV_GSO_CB(skb);
1772 if (cb->destructor)
1773 cb->destructor(skb);
1774}
1775
1776/**
1777 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1778 * @skb: buffer to segment
1779 *
1780 * This function segments the given skb and stores the list of segments
1781 * in skb->next.
1782 */
1783static int dev_gso_segment(struct sk_buff *skb)
1784{
1785 struct net_device *dev = skb->dev;
1786 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001787 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1788 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001789
Herbert Xu576a30e2006-06-27 13:22:38 -07001790 segs = skb_gso_segment(skb, features);
1791
1792 /* Verifying header integrity only. */
1793 if (!segs)
1794 return 0;
1795
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001796 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001797 return PTR_ERR(segs);
1798
1799 skb->next = segs;
1800 DEV_GSO_CB(skb)->destructor = skb->destructor;
1801 skb->destructor = dev_gso_skb_destructor;
1802
1803 return 0;
1804}
1805
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001806int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1807 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001808{
Stephen Hemminger00829822008-11-20 20:14:53 -08001809 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001810 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001811
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001812 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001813 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001814 dev_queue_xmit_nit(skb, dev);
1815
Herbert Xu576a30e2006-06-27 13:22:38 -07001816 if (netif_needs_gso(dev, skb)) {
1817 if (unlikely(dev_gso_segment(skb)))
1818 goto out_kfree_skb;
1819 if (skb->next)
1820 goto gso;
1821 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001822
Eric Dumazet93f154b2009-05-18 22:19:19 -07001823 /*
1824 * If device doesnt need skb->dst, release it right now while
1825 * its hot in this cpu cache
1826 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001827 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1828 skb_dst_drop(skb);
1829
Patrick Ohlyac45f602009-02-12 05:03:37 +00001830 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001831 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001832 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001833 /*
1834 * TODO: if skb_orphan() was called by
1835 * dev->hard_start_xmit() (for example, the unmodified
1836 * igb driver does that; bnx2 doesn't), then
1837 * skb_tx_software_timestamp() will be unable to send
1838 * back the time stamp.
1839 *
1840 * How can this be prevented? Always create another
1841 * reference to the socket before calling
1842 * dev->hard_start_xmit()? Prevent that skb_orphan()
1843 * does anything in dev->hard_start_xmit() by clearing
1844 * the skb destructor before the call and restoring it
1845 * afterwards, then doing the skb_orphan() ourselves?
1846 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001847 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001848 }
1849
Herbert Xu576a30e2006-06-27 13:22:38 -07001850gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001851 do {
1852 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001853
1854 skb->next = nskb->next;
1855 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001856 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001857 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001858 if (rc & ~NETDEV_TX_MASK)
1859 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001860 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001861 skb->next = nskb;
1862 return rc;
1863 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001864 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001865 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001866 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001867 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001868
Patrick McHardy572a9d72009-11-10 06:14:14 +00001869out_kfree_gso_skb:
1870 if (likely(skb->next == NULL))
1871 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001872out_kfree_skb:
1873 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001874 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001875}
1876
David S. Miller70192982009-01-27 16:34:47 -08001877static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001878
Stephen Hemminger92477442009-03-21 13:39:26 -07001879u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001880{
David S. Miller70192982009-01-27 16:34:47 -08001881 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001882
David S. Miller513de112009-05-03 14:43:10 -07001883 if (skb_rx_queue_recorded(skb)) {
1884 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001885 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001886 hash -= dev->real_num_tx_queues;
1887 return hash;
1888 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001889
1890 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001891 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001892 else
David S. Miller70192982009-01-27 16:34:47 -08001893 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001894
David S. Miller70192982009-01-27 16:34:47 -08001895 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001896
David S. Millerb6b2fed2008-07-21 09:48:06 -07001897 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001898}
Stephen Hemminger92477442009-03-21 13:39:26 -07001899EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001900
Eric Dumazeted046422009-11-13 21:54:04 +00001901static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1902{
1903 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1904 if (net_ratelimit()) {
1905 WARN(1, "%s selects TX queue %d, but "
1906 "real number of TX queues is %d\n",
1907 dev->name, queue_index,
1908 dev->real_num_tx_queues);
1909 }
1910 return 0;
1911 }
1912 return queue_index;
1913}
1914
David S. Millere8a04642008-07-17 00:34:19 -07001915static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1916 struct sk_buff *skb)
1917{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001918 u16 queue_index;
1919 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001920
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001921 if (sk_tx_queue_recorded(sk)) {
1922 queue_index = sk_tx_queue_get(sk);
1923 } else {
1924 const struct net_device_ops *ops = dev->netdev_ops;
1925
1926 if (ops->ndo_select_queue) {
1927 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001928 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001929 } else {
1930 queue_index = 0;
1931 if (dev->real_num_tx_queues > 1)
1932 queue_index = skb_tx_hash(dev, skb);
1933
1934 if (sk && sk->sk_dst_cache)
1935 sk_tx_queue_set(sk, queue_index);
1936 }
1937 }
David S. Millereae792b2008-07-15 03:03:33 -07001938
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001939 skb_set_queue_mapping(skb, queue_index);
1940 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001941}
1942
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001943static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1944 struct net_device *dev,
1945 struct netdev_queue *txq)
1946{
1947 spinlock_t *root_lock = qdisc_lock(q);
1948 int rc;
1949
1950 spin_lock(root_lock);
1951 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1952 kfree_skb(skb);
1953 rc = NET_XMIT_DROP;
1954 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1955 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1956 /*
1957 * This is a work-conserving queue; there are no old skbs
1958 * waiting to be sent out; and the qdisc is not running -
1959 * xmit the skb directly.
1960 */
1961 __qdisc_update_bstats(q, skb->len);
1962 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1963 __qdisc_run(q);
1964 else
1965 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1966
1967 rc = NET_XMIT_SUCCESS;
1968 } else {
1969 rc = qdisc_enqueue_root(skb, q);
1970 qdisc_run(q);
1971 }
1972 spin_unlock(root_lock);
1973
1974 return rc;
1975}
1976
Dave Jonesd29f7492008-07-22 14:09:06 -07001977/**
1978 * dev_queue_xmit - transmit a buffer
1979 * @skb: buffer to transmit
1980 *
1981 * Queue a buffer for transmission to a network device. The caller must
1982 * have set the device and priority and built the buffer before calling
1983 * this function. The function can be called from an interrupt.
1984 *
1985 * A negative errno code is returned on a failure. A success does not
1986 * guarantee the frame will be transmitted as it may be dropped due
1987 * to congestion or traffic shaping.
1988 *
1989 * -----------------------------------------------------------------------------------
1990 * I notice this method can also return errors from the queue disciplines,
1991 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1992 * be positive.
1993 *
1994 * Regardless of the return value, the skb is consumed, so it is currently
1995 * difficult to retry a send to this method. (You can bump the ref count
1996 * before sending to hold a reference for retry if you are careful.)
1997 *
1998 * When calling this method, interrupts MUST be enabled. This is because
1999 * the BH enable code must have IRQs enabled so that it will not deadlock.
2000 * --BLG
2001 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002int dev_queue_xmit(struct sk_buff *skb)
2003{
2004 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002005 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 struct Qdisc *q;
2007 int rc = -ENOMEM;
2008
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002009 /* GSO will handle the following emulations directly. */
2010 if (netif_needs_gso(dev, skb))
2011 goto gso;
2012
David S. Miller4cf704f2009-06-09 00:18:51 -07002013 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07002015 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 goto out_kfree_skb;
2017
2018 /* Fragmented skb is linearized if device does not support SG,
2019 * or if at least one of fragments is in highmem and device
2020 * does not support DMA from it.
2021 */
2022 if (skb_shinfo(skb)->nr_frags &&
2023 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07002024 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 goto out_kfree_skb;
2026
2027 /* If packet is not checksummed and device does not support
2028 * checksumming for this protocol, complete checksumming here.
2029 */
Herbert Xu663ead32007-04-09 11:59:07 -07002030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2031 skb_set_transport_header(skb, skb->csum_start -
2032 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002033 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2034 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002037gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002038 /* Disable soft irqs for various locks below. Also
2039 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002041 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
David S. Millereae792b2008-07-15 03:03:33 -07002043 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07002044 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002047 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048#endif
2049 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002050 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002051 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 }
2053
2054 /* The device has no queue. Common case for software devices:
2055 loopback, all the sorts of tunnels...
2056
Herbert Xu932ff272006-06-09 12:20:56 -07002057 Really, it is unlikely that netif_tx_lock protection is necessary
2058 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 counters.)
2060 However, it is possible, that they rely on protection
2061 made by us here.
2062
2063 Check this and shot the lock. It is not prone from deadlocks.
2064 Either shot noqueue qdisc, it is even simpler 8)
2065 */
2066 if (dev->flags & IFF_UP) {
2067 int cpu = smp_processor_id(); /* ok because BHs are off */
2068
David S. Millerc773e842008-07-08 23:13:53 -07002069 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
David S. Millerc773e842008-07-08 23:13:53 -07002071 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002073 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002074 rc = dev_hard_start_xmit(skb, dev, txq);
2075 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002076 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 goto out;
2078 }
2079 }
David S. Millerc773e842008-07-08 23:13:53 -07002080 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 if (net_ratelimit())
2082 printk(KERN_CRIT "Virtual device %s asks to "
2083 "queue packet!\n", dev->name);
2084 } else {
2085 /* Recursion is detected! It is possible,
2086 * unfortunately */
2087 if (net_ratelimit())
2088 printk(KERN_CRIT "Dead loop on virtual device "
2089 "%s, fix it urgently!\n", dev->name);
2090 }
2091 }
2092
2093 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002094 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096out_kfree_skb:
2097 kfree_skb(skb);
2098 return rc;
2099out:
Herbert Xud4828d82006-06-22 02:28:18 -07002100 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return rc;
2102}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002103EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105
2106/*=======================================================================
2107 Receiver routines
2108 =======================================================================*/
2109
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002110int netdev_max_backlog __read_mostly = 1000;
2111int netdev_budget __read_mostly = 300;
2112int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2115
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117/**
2118 * netif_rx - post buffer to the network code
2119 * @skb: buffer to post
2120 *
2121 * This function receives a packet from a device driver and queues it for
2122 * the upper (protocol) levels to process. It always succeeds. The buffer
2123 * may be dropped during processing for congestion control or by the
2124 * protocol layers.
2125 *
2126 * return values:
2127 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 * NET_RX_DROP (packet was dropped)
2129 *
2130 */
2131
2132int netif_rx(struct sk_buff *skb)
2133{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 struct softnet_data *queue;
2135 unsigned long flags;
2136
2137 /* if netpoll wants it, pretend we never saw it */
2138 if (netpoll_rx(skb))
2139 return NET_RX_DROP;
2140
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002141 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002142 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 /*
2145 * The code is rearranged so that the path is the most
2146 * short when CPU is congested, but is still operating.
2147 */
2148 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 queue = &__get_cpu_var(softnet_data);
2150
2151 __get_cpu_var(netdev_rx_stat).total++;
2152 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2153 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002157 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
2159
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002160 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 goto enqueue;
2162 }
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 __get_cpu_var(netdev_rx_stat).dropped++;
2165 local_irq_restore(flags);
2166
2167 kfree_skb(skb);
2168 return NET_RX_DROP;
2169}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002170EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172int netif_rx_ni(struct sk_buff *skb)
2173{
2174 int err;
2175
2176 preempt_disable();
2177 err = netif_rx(skb);
2178 if (local_softirq_pending())
2179 do_softirq();
2180 preempt_enable();
2181
2182 return err;
2183}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184EXPORT_SYMBOL(netif_rx_ni);
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186static void net_tx_action(struct softirq_action *h)
2187{
2188 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2189
2190 if (sd->completion_queue) {
2191 struct sk_buff *clist;
2192
2193 local_irq_disable();
2194 clist = sd->completion_queue;
2195 sd->completion_queue = NULL;
2196 local_irq_enable();
2197
2198 while (clist) {
2199 struct sk_buff *skb = clist;
2200 clist = clist->next;
2201
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002202 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 __kfree_skb(skb);
2204 }
2205 }
2206
2207 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002208 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210 local_irq_disable();
2211 head = sd->output_queue;
2212 sd->output_queue = NULL;
2213 local_irq_enable();
2214
2215 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002216 struct Qdisc *q = head;
2217 spinlock_t *root_lock;
2218
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 head = head->next_sched;
2220
David S. Miller5fb66222008-08-02 20:02:43 -07002221 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002222 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002223 smp_mb__before_clear_bit();
2224 clear_bit(__QDISC_STATE_SCHED,
2225 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002226 qdisc_run(q);
2227 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002229 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002230 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002231 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002232 } else {
2233 smp_mb__before_clear_bit();
2234 clear_bit(__QDISC_STATE_SCHED,
2235 &q->state);
2236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 }
2238 }
2239 }
2240}
2241
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002242static inline int deliver_skb(struct sk_buff *skb,
2243 struct packet_type *pt_prev,
2244 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
2246 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002247 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248}
2249
2250#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002251
2252#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2253/* This hook is defined here for ATM LANE */
2254int (*br_fdb_test_addr_hook)(struct net_device *dev,
2255 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002256EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002257#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Stephen Hemminger6229e362007-03-21 13:38:47 -07002259/*
2260 * If bridge module is loaded call bridging hook.
2261 * returns NULL if packet was consumed.
2262 */
2263struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2264 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002265EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002266
Stephen Hemminger6229e362007-03-21 13:38:47 -07002267static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2268 struct packet_type **pt_prev, int *ret,
2269 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270{
2271 struct net_bridge_port *port;
2272
Stephen Hemminger6229e362007-03-21 13:38:47 -07002273 if (skb->pkt_type == PACKET_LOOPBACK ||
2274 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2275 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
2277 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002278 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002280 }
2281
Stephen Hemminger6229e362007-03-21 13:38:47 -07002282 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002285#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286#endif
2287
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002288#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2289struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2290EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2291
2292static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2293 struct packet_type **pt_prev,
2294 int *ret,
2295 struct net_device *orig_dev)
2296{
2297 if (skb->dev->macvlan_port == NULL)
2298 return skb;
2299
2300 if (*pt_prev) {
2301 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2302 *pt_prev = NULL;
2303 }
2304 return macvlan_handle_frame_hook(skb);
2305}
2306#else
2307#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2308#endif
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310#ifdef CONFIG_NET_CLS_ACT
2311/* TODO: Maybe we should just force sch_ingress to be compiled in
2312 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2313 * a compare and 2 stores extra right now if we dont have it on
2314 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002315 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 * the ingress scheduler, you just cant add policies on ingress.
2317 *
2318 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002319static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002322 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002323 struct netdev_queue *rxq;
2324 int result = TC_ACT_OK;
2325 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002326
Herbert Xuf697c3e2007-10-14 00:38:47 -07002327 if (MAX_RED_LOOP < ttl++) {
2328 printk(KERN_WARNING
2329 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002330 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002331 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 }
2333
Herbert Xuf697c3e2007-10-14 00:38:47 -07002334 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2335 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2336
David S. Miller555353c2008-07-08 17:33:13 -07002337 rxq = &dev->rx_queue;
2338
David S. Miller83874002008-07-17 00:53:03 -07002339 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002340 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002341 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002342 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2343 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002344 spin_unlock(qdisc_lock(q));
2345 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002346
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 return result;
2348}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002349
2350static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2351 struct packet_type **pt_prev,
2352 int *ret, struct net_device *orig_dev)
2353{
David S. Miller8d50b532008-07-30 02:37:46 -07002354 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002355 goto out;
2356
2357 if (*pt_prev) {
2358 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2359 *pt_prev = NULL;
2360 } else {
2361 /* Huh? Why does turning on AF_PACKET affect this? */
2362 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2363 }
2364
2365 switch (ing_filter(skb)) {
2366 case TC_ACT_SHOT:
2367 case TC_ACT_STOLEN:
2368 kfree_skb(skb);
2369 return NULL;
2370 }
2371
2372out:
2373 skb->tc_verd = 0;
2374 return skb;
2375}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376#endif
2377
Patrick McHardybc1d0412008-07-14 22:49:30 -07002378/*
2379 * netif_nit_deliver - deliver received packets to network taps
2380 * @skb: buffer
2381 *
2382 * This function is used to deliver incoming packets to network
2383 * taps. It should be used when the normal netif_receive_skb path
2384 * is bypassed, for example because of VLAN acceleration.
2385 */
2386void netif_nit_deliver(struct sk_buff *skb)
2387{
2388 struct packet_type *ptype;
2389
2390 if (list_empty(&ptype_all))
2391 return;
2392
2393 skb_reset_network_header(skb);
2394 skb_reset_transport_header(skb);
2395 skb->mac_len = skb->network_header - skb->mac_header;
2396
2397 rcu_read_lock();
2398 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2399 if (!ptype->dev || ptype->dev == skb->dev)
2400 deliver_skb(skb, ptype, skb->dev);
2401 }
2402 rcu_read_unlock();
2403}
2404
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002405/**
2406 * netif_receive_skb - process receive buffer from network
2407 * @skb: buffer to process
2408 *
2409 * netif_receive_skb() is the main receive data processing function.
2410 * It always succeeds. The buffer may be dropped during processing
2411 * for congestion control or by the protocol layers.
2412 *
2413 * This function may only be called from softirq context and interrupts
2414 * should be enabled.
2415 *
2416 * Return values (usually ignored):
2417 * NET_RX_SUCCESS: no congestion
2418 * NET_RX_DROP: packet was dropped
2419 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420int netif_receive_skb(struct sk_buff *skb)
2421{
2422 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002423 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002424 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002426 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002428 if (!skb->tstamp.tv64)
2429 net_timestamp(skb);
2430
Eric Dumazet05423b22009-10-26 18:40:35 -07002431 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002432 return NET_RX_SUCCESS;
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002435 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 return NET_RX_DROP;
2437
Eric Dumazet8964be42009-11-20 15:35:04 -08002438 if (!skb->skb_iif)
2439 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002440
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002441 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002442 orig_dev = skb->dev;
2443 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002444 if (skb_bond_should_drop(skb))
2445 null_or_orig = orig_dev; /* deliver only exact match */
2446 else
2447 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002448 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 __get_cpu_var(netdev_rx_stat).total++;
2451
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002452 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002453 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002454 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
2456 pt_prev = NULL;
2457
2458 rcu_read_lock();
2459
2460#ifdef CONFIG_NET_CLS_ACT
2461 if (skb->tc_verd & TC_NCLS) {
2462 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2463 goto ncls;
2464 }
2465#endif
2466
2467 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002468 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2469 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002470 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002471 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 pt_prev = ptype;
2473 }
2474 }
2475
2476#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002477 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2478 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480ncls:
2481#endif
2482
Stephen Hemminger6229e362007-03-21 13:38:47 -07002483 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2484 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002486 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2487 if (!skb)
2488 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
2490 type = skb->protocol;
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08002491 list_for_each_entry_rcu(ptype,
2492 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002494 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2495 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002496 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002497 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 pt_prev = ptype;
2499 }
2500 }
2501
2502 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002503 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 } else {
2505 kfree_skb(skb);
2506 /* Jamal, now you will not able to escape explaining
2507 * me how you were going to use this. :-)
2508 */
2509 ret = NET_RX_DROP;
2510 }
2511
2512out:
2513 rcu_read_unlock();
2514 return ret;
2515}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002516EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002518/* Network device is going away, flush any packets still pending */
2519static void flush_backlog(void *arg)
2520{
2521 struct net_device *dev = arg;
2522 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2523 struct sk_buff *skb, *tmp;
2524
2525 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2526 if (skb->dev == dev) {
2527 __skb_unlink(skb, &queue->input_pkt_queue);
2528 kfree_skb(skb);
2529 }
2530}
2531
Herbert Xud565b0a2008-12-15 23:38:52 -08002532static int napi_gro_complete(struct sk_buff *skb)
2533{
2534 struct packet_type *ptype;
2535 __be16 type = skb->protocol;
2536 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2537 int err = -ENOENT;
2538
Herbert Xufc59f9a2009-04-14 15:11:06 -07002539 if (NAPI_GRO_CB(skb)->count == 1) {
2540 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002541 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002542 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002543
2544 rcu_read_lock();
2545 list_for_each_entry_rcu(ptype, head, list) {
2546 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2547 continue;
2548
2549 err = ptype->gro_complete(skb);
2550 break;
2551 }
2552 rcu_read_unlock();
2553
2554 if (err) {
2555 WARN_ON(&ptype->list == head);
2556 kfree_skb(skb);
2557 return NET_RX_SUCCESS;
2558 }
2559
2560out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002561 return netif_receive_skb(skb);
2562}
2563
2564void napi_gro_flush(struct napi_struct *napi)
2565{
2566 struct sk_buff *skb, *next;
2567
2568 for (skb = napi->gro_list; skb; skb = next) {
2569 next = skb->next;
2570 skb->next = NULL;
2571 napi_gro_complete(skb);
2572 }
2573
Herbert Xu4ae55442009-02-08 18:00:36 +00002574 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002575 napi->gro_list = NULL;
2576}
2577EXPORT_SYMBOL(napi_gro_flush);
2578
Ben Hutchings5b252f02009-10-29 07:17:09 +00002579enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002580{
2581 struct sk_buff **pp = NULL;
2582 struct packet_type *ptype;
2583 __be16 type = skb->protocol;
2584 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd2008-12-26 14:57:42 -08002585 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002586 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002587 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002588
2589 if (!(skb->dev->features & NETIF_F_GRO))
2590 goto normal;
2591
David S. Miller4cf704f2009-06-09 00:18:51 -07002592 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002593 goto normal;
2594
Herbert Xud565b0a2008-12-15 23:38:52 -08002595 rcu_read_lock();
2596 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002597 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2598 continue;
2599
Herbert Xu86911732009-01-29 14:19:50 +00002600 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002601 mac_len = skb->network_header - skb->mac_header;
2602 skb->mac_len = mac_len;
2603 NAPI_GRO_CB(skb)->same_flow = 0;
2604 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002605 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002606
Herbert Xud565b0a2008-12-15 23:38:52 -08002607 pp = ptype->gro_receive(&napi->gro_list, skb);
2608 break;
2609 }
2610 rcu_read_unlock();
2611
2612 if (&ptype->list == head)
2613 goto normal;
2614
Herbert Xu0da2afd2008-12-26 14:57:42 -08002615 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002616 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd2008-12-26 14:57:42 -08002617
Herbert Xud565b0a2008-12-15 23:38:52 -08002618 if (pp) {
2619 struct sk_buff *nskb = *pp;
2620
2621 *pp = nskb->next;
2622 nskb->next = NULL;
2623 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002624 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002625 }
2626
Herbert Xu0da2afd2008-12-26 14:57:42 -08002627 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002628 goto ok;
2629
Herbert Xu4ae55442009-02-08 18:00:36 +00002630 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002631 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002632
Herbert Xu4ae55442009-02-08 18:00:36 +00002633 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002634 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002635 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002636 skb->next = napi->gro_list;
2637 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002638 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002639
Herbert Xuad0f9902009-02-01 01:24:55 -08002640pull:
Herbert Xucb189782009-05-26 18:50:31 +00002641 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2642 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2643
2644 BUG_ON(skb->end - skb->tail < grow);
2645
2646 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2647
2648 skb->tail += grow;
2649 skb->data_len -= grow;
2650
2651 skb_shinfo(skb)->frags[0].page_offset += grow;
2652 skb_shinfo(skb)->frags[0].size -= grow;
2653
2654 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2655 put_page(skb_shinfo(skb)->frags[0].page);
2656 memmove(skb_shinfo(skb)->frags,
2657 skb_shinfo(skb)->frags + 1,
2658 --skb_shinfo(skb)->nr_frags);
2659 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002660 }
2661
Herbert Xud565b0a2008-12-15 23:38:52 -08002662ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002663 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002664
2665normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002666 ret = GRO_NORMAL;
2667 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002668}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002669EXPORT_SYMBOL(dev_gro_receive);
2670
Ben Hutchings5b252f02009-10-29 07:17:09 +00002671static gro_result_t
2672__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002673{
2674 struct sk_buff *p;
2675
Herbert Xud1c76af2009-03-16 10:50:02 -07002676 if (netpoll_rx_on(skb))
2677 return GRO_NORMAL;
2678
Herbert Xu96e93ea2009-01-06 10:49:34 -08002679 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08002680 NAPI_GRO_CB(p)->same_flow =
2681 (p->dev == skb->dev) &&
2682 !compare_ether_header(skb_mac_header(p),
2683 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002684 NAPI_GRO_CB(p)->flush = 0;
2685 }
2686
2687 return dev_gro_receive(napi, skb);
2688}
Herbert Xu5d38a072009-01-04 16:13:40 -08002689
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002690gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002691{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002692 switch (ret) {
2693 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002694 if (netif_receive_skb(skb))
2695 ret = GRO_DROP;
2696 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002697
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002698 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002699 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002700 kfree_skb(skb);
2701 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002702
2703 case GRO_HELD:
2704 case GRO_MERGED:
2705 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002706 }
2707
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002708 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002709}
2710EXPORT_SYMBOL(napi_skb_finish);
2711
Herbert Xu78a478d2009-05-26 18:50:21 +00002712void skb_gro_reset_offset(struct sk_buff *skb)
2713{
2714 NAPI_GRO_CB(skb)->data_offset = 0;
2715 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002716 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002717
Herbert Xu78d3fd02009-05-26 18:50:23 +00002718 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002719 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002720 NAPI_GRO_CB(skb)->frag0 =
2721 page_address(skb_shinfo(skb)->frags[0].page) +
2722 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002723 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2724 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002725}
2726EXPORT_SYMBOL(skb_gro_reset_offset);
2727
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002728gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002729{
Herbert Xu86911732009-01-29 14:19:50 +00002730 skb_gro_reset_offset(skb);
2731
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002732 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002733}
2734EXPORT_SYMBOL(napi_gro_receive);
2735
Herbert Xu96e93ea2009-01-06 10:49:34 -08002736void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2737{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002738 __skb_pull(skb, skb_headlen(skb));
2739 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2740
2741 napi->skb = skb;
2742}
2743EXPORT_SYMBOL(napi_reuse_skb);
2744
Herbert Xu76620aa2009-04-16 02:02:07 -07002745struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002746{
Herbert Xu5d38a072009-01-04 16:13:40 -08002747 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002748
2749 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002750 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2751 if (skb)
2752 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002753 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002754 return skb;
2755}
Herbert Xu76620aa2009-04-16 02:02:07 -07002756EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002757
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002758gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2759 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002760{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002761 switch (ret) {
2762 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002763 case GRO_HELD:
Ajit Khapardec4d49792010-02-16 20:25:43 +00002764 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00002765
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002766 if (ret == GRO_HELD)
2767 skb_gro_pull(skb, -ETH_HLEN);
2768 else if (netif_receive_skb(skb))
2769 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002770 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002771
2772 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002773 case GRO_MERGED_FREE:
2774 napi_reuse_skb(napi, skb);
2775 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002776
2777 case GRO_MERGED:
2778 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002779 }
2780
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002781 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002782}
2783EXPORT_SYMBOL(napi_frags_finish);
2784
Herbert Xu76620aa2009-04-16 02:02:07 -07002785struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002786{
Herbert Xu76620aa2009-04-16 02:02:07 -07002787 struct sk_buff *skb = napi->skb;
2788 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002789 unsigned int hlen;
2790 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002791
2792 napi->skb = NULL;
2793
2794 skb_reset_mac_header(skb);
2795 skb_gro_reset_offset(skb);
2796
Herbert Xua5b1cf22009-05-26 18:50:28 +00002797 off = skb_gro_offset(skb);
2798 hlen = off + sizeof(*eth);
2799 eth = skb_gro_header_fast(skb, off);
2800 if (skb_gro_header_hard(skb, hlen)) {
2801 eth = skb_gro_header_slow(skb, hlen, off);
2802 if (unlikely(!eth)) {
2803 napi_reuse_skb(napi, skb);
2804 skb = NULL;
2805 goto out;
2806 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002807 }
2808
2809 skb_gro_pull(skb, sizeof(*eth));
2810
2811 /*
2812 * This works because the only protocols we care about don't require
2813 * special handling. We'll fix it up properly at the end.
2814 */
2815 skb->protocol = eth->h_proto;
2816
2817out:
2818 return skb;
2819}
2820EXPORT_SYMBOL(napi_frags_skb);
2821
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002822gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002823{
2824 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002825
2826 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002827 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002828
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002829 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002830}
2831EXPORT_SYMBOL(napi_gro_frags);
2832
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002833static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834{
2835 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2837 unsigned long start_time = jiffies;
2838
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002839 napi->weight = weight_p;
2840 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842
2843 local_irq_disable();
2844 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002845 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002846 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002847 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002848 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002849 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 local_irq_enable();
2851
Herbert Xu8f1ead22009-03-26 00:59:10 -07002852 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002853 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002855 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856}
2857
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002858/**
2859 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002860 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002861 *
2862 * The entry's receive function will be scheduled to run
2863 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002864void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002865{
2866 unsigned long flags;
2867
2868 local_irq_save(flags);
2869 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2870 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2871 local_irq_restore(flags);
2872}
2873EXPORT_SYMBOL(__napi_schedule);
2874
Herbert Xud565b0a2008-12-15 23:38:52 -08002875void __napi_complete(struct napi_struct *n)
2876{
2877 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2878 BUG_ON(n->gro_list);
2879
2880 list_del(&n->poll_list);
2881 smp_mb__before_clear_bit();
2882 clear_bit(NAPI_STATE_SCHED, &n->state);
2883}
2884EXPORT_SYMBOL(__napi_complete);
2885
2886void napi_complete(struct napi_struct *n)
2887{
2888 unsigned long flags;
2889
2890 /*
2891 * don't let napi dequeue from the cpu poll list
2892 * just in case its running on a different cpu
2893 */
2894 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2895 return;
2896
2897 napi_gro_flush(n);
2898 local_irq_save(flags);
2899 __napi_complete(n);
2900 local_irq_restore(flags);
2901}
2902EXPORT_SYMBOL(napi_complete);
2903
2904void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2905 int (*poll)(struct napi_struct *, int), int weight)
2906{
2907 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002908 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002909 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002910 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002911 napi->poll = poll;
2912 napi->weight = weight;
2913 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002914 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002915#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002916 spin_lock_init(&napi->poll_lock);
2917 napi->poll_owner = -1;
2918#endif
2919 set_bit(NAPI_STATE_SCHED, &napi->state);
2920}
2921EXPORT_SYMBOL(netif_napi_add);
2922
2923void netif_napi_del(struct napi_struct *napi)
2924{
2925 struct sk_buff *skb, *next;
2926
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002927 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002928 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002929
2930 for (skb = napi->gro_list; skb; skb = next) {
2931 next = skb->next;
2932 skb->next = NULL;
2933 kfree_skb(skb);
2934 }
2935
2936 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002937 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002938}
2939EXPORT_SYMBOL(netif_napi_del);
2940
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002941
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942static void net_rx_action(struct softirq_action *h)
2943{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002944 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002945 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002946 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002947 void *have;
2948
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 local_irq_disable();
2950
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002951 while (!list_empty(list)) {
2952 struct napi_struct *n;
2953 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002955 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002956 * Allow this to run for 2 jiffies since which will allow
2957 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002958 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002959 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 goto softnet_break;
2961
2962 local_irq_enable();
2963
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002964 /* Even though interrupts have been re-enabled, this
2965 * access is safe because interrupts can only add new
2966 * entries to the tail of this list, and only ->poll()
2967 * calls can remove this head entry from the list.
2968 */
2969 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002971 have = netpoll_poll_lock(n);
2972
2973 weight = n->weight;
2974
David S. Miller0a7606c2007-10-29 21:28:47 -07002975 /* This NAPI_STATE_SCHED test is for avoiding a race
2976 * with netpoll's poll_napi(). Only the entity which
2977 * obtains the lock and sees NAPI_STATE_SCHED set will
2978 * actually make the ->poll() call. Therefore we avoid
2979 * accidently calling ->poll() when NAPI is not scheduled.
2980 */
2981 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002982 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002983 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002984 trace_napi_poll(n);
2985 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002986
2987 WARN_ON_ONCE(work > weight);
2988
2989 budget -= work;
2990
2991 local_irq_disable();
2992
2993 /* Drivers must not modify the NAPI state if they
2994 * consume the entire weight. In such cases this code
2995 * still "owns" the NAPI instance and therefore can
2996 * move the instance around on the list at-will.
2997 */
David S. Millerfed17f32008-01-07 21:00:40 -08002998 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002999 if (unlikely(napi_disable_pending(n))) {
3000 local_irq_enable();
3001 napi_complete(n);
3002 local_irq_disable();
3003 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003004 list_move_tail(&n->poll_list, list);
3005 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003006
3007 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 }
3009out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07003010 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003011
Chris Leechdb217332006-06-17 21:24:58 -07003012#ifdef CONFIG_NET_DMA
3013 /*
3014 * There may not be any more sk_buffs coming right now, so push
3015 * any pending DMA copies to hardware
3016 */
Dan Williams2ba05622009-01-06 11:38:14 -07003017 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003018#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 return;
3021
3022softnet_break:
3023 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3024 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3025 goto out;
3026}
3027
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003028static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
3030/**
3031 * register_gifconf - register a SIOCGIF handler
3032 * @family: Address family
3033 * @gifconf: Function handler
3034 *
3035 * Register protocol dependent address dumping routines. The handler
3036 * that is passed must not be freed or reused until it has been replaced
3037 * by another handler.
3038 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003039int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040{
3041 if (family >= NPROTO)
3042 return -EINVAL;
3043 gifconf_list[family] = gifconf;
3044 return 0;
3045}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003046EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047
3048
3049/*
3050 * Map an interface index to its name (SIOCGIFNAME)
3051 */
3052
3053/*
3054 * We need this ioctl for efficient implementation of the
3055 * if_indextoname() function required by the IPv6 API. Without
3056 * it, we would have to search all the interfaces to find a
3057 * match. --pb
3058 */
3059
Eric W. Biederman881d9662007-09-17 11:56:21 -07003060static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061{
3062 struct net_device *dev;
3063 struct ifreq ifr;
3064
3065 /*
3066 * Fetch the caller's info block.
3067 */
3068
3069 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3070 return -EFAULT;
3071
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003072 rcu_read_lock();
3073 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003075 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 return -ENODEV;
3077 }
3078
3079 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003080 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081
3082 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3083 return -EFAULT;
3084 return 0;
3085}
3086
3087/*
3088 * Perform a SIOCGIFCONF call. This structure will change
3089 * size eventually, and there is nothing I can do about it.
3090 * Thus we will need a 'compatibility mode'.
3091 */
3092
Eric W. Biederman881d9662007-09-17 11:56:21 -07003093static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094{
3095 struct ifconf ifc;
3096 struct net_device *dev;
3097 char __user *pos;
3098 int len;
3099 int total;
3100 int i;
3101
3102 /*
3103 * Fetch the caller's info block.
3104 */
3105
3106 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3107 return -EFAULT;
3108
3109 pos = ifc.ifc_buf;
3110 len = ifc.ifc_len;
3111
3112 /*
3113 * Loop over the interfaces, and write an info block for each.
3114 */
3115
3116 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003117 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 for (i = 0; i < NPROTO; i++) {
3119 if (gifconf_list[i]) {
3120 int done;
3121 if (!pos)
3122 done = gifconf_list[i](dev, NULL, 0);
3123 else
3124 done = gifconf_list[i](dev, pos + total,
3125 len - total);
3126 if (done < 0)
3127 return -EFAULT;
3128 total += done;
3129 }
3130 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132
3133 /*
3134 * All done. Write the updated control block back to the caller.
3135 */
3136 ifc.ifc_len = total;
3137
3138 /*
3139 * Both BSD and Solaris return 0 here, so we do too.
3140 */
3141 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3142}
3143
3144#ifdef CONFIG_PROC_FS
3145/*
3146 * This is invoked by the /proc filesystem handler to display a device
3147 * in detail.
3148 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003150 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151{
Denis V. Luneve372c412007-11-19 22:31:54 -08003152 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003153 loff_t off;
3154 struct net_device *dev;
3155
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003156 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003157 if (!*pos)
3158 return SEQ_START_TOKEN;
3159
3160 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003161 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003162 if (off++ == *pos)
3163 return dev;
3164
3165 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166}
3167
3168void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3169{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003170 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3171 first_net_device(seq_file_net(seq)) :
3172 next_net_device((struct net_device *)v);
3173
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003175 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176}
3177
3178void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003179 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003181 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182}
3183
3184static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3185{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003186 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
Rusty Russell5a1b5892007-04-28 21:04:03 -07003188 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3189 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3190 dev->name, stats->rx_bytes, stats->rx_packets,
3191 stats->rx_errors,
3192 stats->rx_dropped + stats->rx_missed_errors,
3193 stats->rx_fifo_errors,
3194 stats->rx_length_errors + stats->rx_over_errors +
3195 stats->rx_crc_errors + stats->rx_frame_errors,
3196 stats->rx_compressed, stats->multicast,
3197 stats->tx_bytes, stats->tx_packets,
3198 stats->tx_errors, stats->tx_dropped,
3199 stats->tx_fifo_errors, stats->collisions,
3200 stats->tx_carrier_errors +
3201 stats->tx_aborted_errors +
3202 stats->tx_window_errors +
3203 stats->tx_heartbeat_errors,
3204 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205}
3206
3207/*
3208 * Called from the PROCfs module. This now uses the new arbitrary sized
3209 * /proc/net interface to create /proc/net/dev
3210 */
3211static int dev_seq_show(struct seq_file *seq, void *v)
3212{
3213 if (v == SEQ_START_TOKEN)
3214 seq_puts(seq, "Inter-| Receive "
3215 " | Transmit\n"
3216 " face |bytes packets errs drop fifo frame "
3217 "compressed multicast|bytes packets errs "
3218 "drop fifo colls carrier compressed\n");
3219 else
3220 dev_seq_printf_stats(seq, v);
3221 return 0;
3222}
3223
3224static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3225{
3226 struct netif_rx_stats *rc = NULL;
3227
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003228 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003229 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 rc = &per_cpu(netdev_rx_stat, *pos);
3231 break;
3232 } else
3233 ++*pos;
3234 return rc;
3235}
3236
3237static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3238{
3239 return softnet_get_online(pos);
3240}
3241
3242static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3243{
3244 ++*pos;
3245 return softnet_get_online(pos);
3246}
3247
3248static void softnet_seq_stop(struct seq_file *seq, void *v)
3249{
3250}
3251
3252static int softnet_seq_show(struct seq_file *seq, void *v)
3253{
3254 struct netif_rx_stats *s = v;
3255
3256 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003257 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003258 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003259 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 return 0;
3261}
3262
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003263static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 .start = dev_seq_start,
3265 .next = dev_seq_next,
3266 .stop = dev_seq_stop,
3267 .show = dev_seq_show,
3268};
3269
3270static int dev_seq_open(struct inode *inode, struct file *file)
3271{
Denis V. Luneve372c412007-11-19 22:31:54 -08003272 return seq_open_net(inode, file, &dev_seq_ops,
3273 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274}
3275
Arjan van de Ven9a321442007-02-12 00:55:35 -08003276static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 .owner = THIS_MODULE,
3278 .open = dev_seq_open,
3279 .read = seq_read,
3280 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003281 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282};
3283
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003284static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 .start = softnet_seq_start,
3286 .next = softnet_seq_next,
3287 .stop = softnet_seq_stop,
3288 .show = softnet_seq_show,
3289};
3290
3291static int softnet_seq_open(struct inode *inode, struct file *file)
3292{
3293 return seq_open(file, &softnet_seq_ops);
3294}
3295
Arjan van de Ven9a321442007-02-12 00:55:35 -08003296static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 .owner = THIS_MODULE,
3298 .open = softnet_seq_open,
3299 .read = seq_read,
3300 .llseek = seq_lseek,
3301 .release = seq_release,
3302};
3303
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003304static void *ptype_get_idx(loff_t pos)
3305{
3306 struct packet_type *pt = NULL;
3307 loff_t i = 0;
3308 int t;
3309
3310 list_for_each_entry_rcu(pt, &ptype_all, list) {
3311 if (i == pos)
3312 return pt;
3313 ++i;
3314 }
3315
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003316 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003317 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3318 if (i == pos)
3319 return pt;
3320 ++i;
3321 }
3322 }
3323 return NULL;
3324}
3325
3326static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003327 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003328{
3329 rcu_read_lock();
3330 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3331}
3332
3333static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3334{
3335 struct packet_type *pt;
3336 struct list_head *nxt;
3337 int hash;
3338
3339 ++*pos;
3340 if (v == SEQ_START_TOKEN)
3341 return ptype_get_idx(0);
3342
3343 pt = v;
3344 nxt = pt->list.next;
3345 if (pt->type == htons(ETH_P_ALL)) {
3346 if (nxt != &ptype_all)
3347 goto found;
3348 hash = 0;
3349 nxt = ptype_base[0].next;
3350 } else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003351 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003352
3353 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003354 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003355 return NULL;
3356 nxt = ptype_base[hash].next;
3357 }
3358found:
3359 return list_entry(nxt, struct packet_type, list);
3360}
3361
3362static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003363 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003364{
3365 rcu_read_unlock();
3366}
3367
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003368static int ptype_seq_show(struct seq_file *seq, void *v)
3369{
3370 struct packet_type *pt = v;
3371
3372 if (v == SEQ_START_TOKEN)
3373 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003374 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003375 if (pt->type == htons(ETH_P_ALL))
3376 seq_puts(seq, "ALL ");
3377 else
3378 seq_printf(seq, "%04x", ntohs(pt->type));
3379
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003380 seq_printf(seq, " %-8s %pF\n",
3381 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003382 }
3383
3384 return 0;
3385}
3386
3387static const struct seq_operations ptype_seq_ops = {
3388 .start = ptype_seq_start,
3389 .next = ptype_seq_next,
3390 .stop = ptype_seq_stop,
3391 .show = ptype_seq_show,
3392};
3393
3394static int ptype_seq_open(struct inode *inode, struct file *file)
3395{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003396 return seq_open_net(inode, file, &ptype_seq_ops,
3397 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003398}
3399
3400static const struct file_operations ptype_seq_fops = {
3401 .owner = THIS_MODULE,
3402 .open = ptype_seq_open,
3403 .read = seq_read,
3404 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003405 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003406};
3407
3408
Pavel Emelyanov46650792007-10-08 20:38:39 -07003409static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410{
3411 int rc = -ENOMEM;
3412
Eric W. Biederman881d9662007-09-17 11:56:21 -07003413 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003415 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003417 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003418 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003419
Eric W. Biederman881d9662007-09-17 11:56:21 -07003420 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003421 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 rc = 0;
3423out:
3424 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003425out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003426 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003428 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003430 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 goto out;
3432}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003433
Pavel Emelyanov46650792007-10-08 20:38:39 -07003434static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003435{
3436 wext_proc_exit(net);
3437
3438 proc_net_remove(net, "ptype");
3439 proc_net_remove(net, "softnet_stat");
3440 proc_net_remove(net, "dev");
3441}
3442
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003443static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003444 .init = dev_proc_net_init,
3445 .exit = dev_proc_net_exit,
3446};
3447
3448static int __init dev_proc_init(void)
3449{
3450 return register_pernet_subsys(&dev_proc_ops);
3451}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452#else
3453#define dev_proc_init() 0
3454#endif /* CONFIG_PROC_FS */
3455
3456
3457/**
3458 * netdev_set_master - set up master/slave pair
3459 * @slave: slave device
3460 * @master: new master device
3461 *
3462 * Changes the master device of the slave. Pass %NULL to break the
3463 * bonding. The caller must hold the RTNL semaphore. On a failure
3464 * a negative errno code is returned. On success the reference counts
3465 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3466 * function returns zero.
3467 */
3468int netdev_set_master(struct net_device *slave, struct net_device *master)
3469{
3470 struct net_device *old = slave->master;
3471
3472 ASSERT_RTNL();
3473
3474 if (master) {
3475 if (old)
3476 return -EBUSY;
3477 dev_hold(master);
3478 }
3479
3480 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003481
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 synchronize_net();
3483
3484 if (old)
3485 dev_put(old);
3486
3487 if (master)
3488 slave->flags |= IFF_SLAVE;
3489 else
3490 slave->flags &= ~IFF_SLAVE;
3491
3492 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3493 return 0;
3494}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003495EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003497static void dev_change_rx_flags(struct net_device *dev, int flags)
3498{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003499 const struct net_device_ops *ops = dev->netdev_ops;
3500
3501 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3502 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003503}
3504
Wang Chendad9b332008-06-18 01:48:28 -07003505static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003506{
3507 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003508 uid_t uid;
3509 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003510
Patrick McHardy24023452007-07-14 18:51:31 -07003511 ASSERT_RTNL();
3512
Wang Chendad9b332008-06-18 01:48:28 -07003513 dev->flags |= IFF_PROMISC;
3514 dev->promiscuity += inc;
3515 if (dev->promiscuity == 0) {
3516 /*
3517 * Avoid overflow.
3518 * If inc causes overflow, untouch promisc and return error.
3519 */
3520 if (inc < 0)
3521 dev->flags &= ~IFF_PROMISC;
3522 else {
3523 dev->promiscuity -= inc;
3524 printk(KERN_WARNING "%s: promiscuity touches roof, "
3525 "set promiscuity failed, promiscuity feature "
3526 "of device might be broken.\n", dev->name);
3527 return -EOVERFLOW;
3528 }
3529 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003530 if (dev->flags != old_flags) {
3531 printk(KERN_INFO "device %s %s promiscuous mode\n",
3532 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3533 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003534 if (audit_enabled) {
3535 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003536 audit_log(current->audit_context, GFP_ATOMIC,
3537 AUDIT_ANOM_PROMISCUOUS,
3538 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3539 dev->name, (dev->flags & IFF_PROMISC),
3540 (old_flags & IFF_PROMISC),
3541 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003542 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003543 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003544 }
Patrick McHardy24023452007-07-14 18:51:31 -07003545
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003546 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003547 }
Wang Chendad9b332008-06-18 01:48:28 -07003548 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003549}
3550
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551/**
3552 * dev_set_promiscuity - update promiscuity count on a device
3553 * @dev: device
3554 * @inc: modifier
3555 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003556 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 * remains above zero the interface remains promiscuous. Once it hits zero
3558 * the device reverts back to normal filtering operation. A negative inc
3559 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003560 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 */
Wang Chendad9b332008-06-18 01:48:28 -07003562int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563{
3564 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003565 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
Wang Chendad9b332008-06-18 01:48:28 -07003567 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003568 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003569 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003570 if (dev->flags != old_flags)
3571 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003572 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003574EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
3576/**
3577 * dev_set_allmulti - update allmulti count on a device
3578 * @dev: device
3579 * @inc: modifier
3580 *
3581 * Add or remove reception of all multicast frames to a device. While the
3582 * count in the device remains above zero the interface remains listening
3583 * to all interfaces. Once it hits zero the device reverts back to normal
3584 * filtering operation. A negative @inc value is used to drop the counter
3585 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003586 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 */
3588
Wang Chendad9b332008-06-18 01:48:28 -07003589int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590{
3591 unsigned short old_flags = dev->flags;
3592
Patrick McHardy24023452007-07-14 18:51:31 -07003593 ASSERT_RTNL();
3594
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003596 dev->allmulti += inc;
3597 if (dev->allmulti == 0) {
3598 /*
3599 * Avoid overflow.
3600 * If inc causes overflow, untouch allmulti and return error.
3601 */
3602 if (inc < 0)
3603 dev->flags &= ~IFF_ALLMULTI;
3604 else {
3605 dev->allmulti -= inc;
3606 printk(KERN_WARNING "%s: allmulti touches roof, "
3607 "set allmulti failed, allmulti feature of "
3608 "device might be broken.\n", dev->name);
3609 return -EOVERFLOW;
3610 }
3611 }
Patrick McHardy24023452007-07-14 18:51:31 -07003612 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003613 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003614 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003615 }
Wang Chendad9b332008-06-18 01:48:28 -07003616 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003617}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003618EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003619
3620/*
3621 * Upload unicast and multicast address lists to device and
3622 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003623 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003624 * are present.
3625 */
3626void __dev_set_rx_mode(struct net_device *dev)
3627{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003628 const struct net_device_ops *ops = dev->netdev_ops;
3629
Patrick McHardy4417da62007-06-27 01:28:10 -07003630 /* dev_open will call this function so the list will stay sane. */
3631 if (!(dev->flags&IFF_UP))
3632 return;
3633
3634 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003635 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003636
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003637 if (ops->ndo_set_rx_mode)
3638 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003639 else {
3640 /* Unicast addresses changes may only happen under the rtnl,
3641 * therefore calling __dev_set_promiscuity here is safe.
3642 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003643 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003644 __dev_set_promiscuity(dev, 1);
3645 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003646 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003647 __dev_set_promiscuity(dev, -1);
3648 dev->uc_promisc = 0;
3649 }
3650
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003651 if (ops->ndo_set_multicast_list)
3652 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003653 }
3654}
3655
3656void dev_set_rx_mode(struct net_device *dev)
3657{
David S. Millerb9e40852008-07-15 00:15:08 -07003658 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003659 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003660 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661}
3662
Jiri Pirkof001fde2009-05-05 02:48:28 +00003663/* hw addresses list handling functions */
3664
Jiri Pirko31278e72009-06-17 01:12:19 +00003665static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3666 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003667{
3668 struct netdev_hw_addr *ha;
3669 int alloc_size;
3670
3671 if (addr_len > MAX_ADDR_LEN)
3672 return -EINVAL;
3673
Jiri Pirko31278e72009-06-17 01:12:19 +00003674 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003675 if (!memcmp(ha->addr, addr, addr_len) &&
3676 ha->type == addr_type) {
3677 ha->refcount++;
3678 return 0;
3679 }
3680 }
3681
3682
Jiri Pirkof001fde2009-05-05 02:48:28 +00003683 alloc_size = sizeof(*ha);
3684 if (alloc_size < L1_CACHE_BYTES)
3685 alloc_size = L1_CACHE_BYTES;
3686 ha = kmalloc(alloc_size, GFP_ATOMIC);
3687 if (!ha)
3688 return -ENOMEM;
3689 memcpy(ha->addr, addr, addr_len);
3690 ha->type = addr_type;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003691 ha->refcount = 1;
3692 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003693 list_add_tail_rcu(&ha->list, &list->list);
3694 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003695 return 0;
3696}
3697
3698static void ha_rcu_free(struct rcu_head *head)
3699{
3700 struct netdev_hw_addr *ha;
3701
3702 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3703 kfree(ha);
3704}
3705
Jiri Pirko31278e72009-06-17 01:12:19 +00003706static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3707 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003708{
3709 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003710
Jiri Pirko31278e72009-06-17 01:12:19 +00003711 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003712 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003713 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003714 if (--ha->refcount)
3715 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003716 list_del_rcu(&ha->list);
3717 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003718 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003719 return 0;
3720 }
3721 }
3722 return -ENOENT;
3723}
3724
Jiri Pirko31278e72009-06-17 01:12:19 +00003725static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3726 struct netdev_hw_addr_list *from_list,
3727 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003728 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003729{
3730 int err;
3731 struct netdev_hw_addr *ha, *ha2;
3732 unsigned char type;
3733
Jiri Pirko31278e72009-06-17 01:12:19 +00003734 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003735 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003736 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003737 if (err)
3738 goto unroll;
3739 }
3740 return 0;
3741
3742unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003743 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003744 if (ha2 == ha)
3745 break;
3746 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003747 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003748 }
3749 return err;
3750}
3751
Jiri Pirko31278e72009-06-17 01:12:19 +00003752static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3753 struct netdev_hw_addr_list *from_list,
3754 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003755 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003756{
3757 struct netdev_hw_addr *ha;
3758 unsigned char type;
3759
Jiri Pirko31278e72009-06-17 01:12:19 +00003760 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003761 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003762 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003763 }
3764}
3765
Jiri Pirko31278e72009-06-17 01:12:19 +00003766static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3767 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003768 int addr_len)
3769{
3770 int err = 0;
3771 struct netdev_hw_addr *ha, *tmp;
3772
Jiri Pirko31278e72009-06-17 01:12:19 +00003773 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003774 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003775 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003776 addr_len, ha->type);
3777 if (err)
3778 break;
3779 ha->synced = true;
3780 ha->refcount++;
3781 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003782 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3783 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003784 }
3785 }
3786 return err;
3787}
3788
Jiri Pirko31278e72009-06-17 01:12:19 +00003789static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3790 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003791 int addr_len)
3792{
3793 struct netdev_hw_addr *ha, *tmp;
3794
Jiri Pirko31278e72009-06-17 01:12:19 +00003795 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003796 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003797 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003798 addr_len, ha->type);
3799 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003800 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003801 addr_len, ha->type);
3802 }
3803 }
3804}
3805
Jiri Pirko31278e72009-06-17 01:12:19 +00003806static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003807{
3808 struct netdev_hw_addr *ha, *tmp;
3809
Jiri Pirko31278e72009-06-17 01:12:19 +00003810 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003811 list_del_rcu(&ha->list);
3812 call_rcu(&ha->rcu_head, ha_rcu_free);
3813 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003814 list->count = 0;
3815}
3816
3817static void __hw_addr_init(struct netdev_hw_addr_list *list)
3818{
3819 INIT_LIST_HEAD(&list->list);
3820 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003821}
3822
3823/* Device addresses handling functions */
3824
3825static void dev_addr_flush(struct net_device *dev)
3826{
3827 /* rtnl_mutex must be held here */
3828
Jiri Pirko31278e72009-06-17 01:12:19 +00003829 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003830 dev->dev_addr = NULL;
3831}
3832
3833static int dev_addr_init(struct net_device *dev)
3834{
3835 unsigned char addr[MAX_ADDR_LEN];
3836 struct netdev_hw_addr *ha;
3837 int err;
3838
3839 /* rtnl_mutex must be held here */
3840
Jiri Pirko31278e72009-06-17 01:12:19 +00003841 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003842 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003843 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003844 NETDEV_HW_ADDR_T_LAN);
3845 if (!err) {
3846 /*
3847 * Get the first (previously created) address from the list
3848 * and set dev_addr pointer to this location.
3849 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003850 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003851 struct netdev_hw_addr, list);
3852 dev->dev_addr = ha->addr;
3853 }
3854 return err;
3855}
3856
3857/**
3858 * dev_addr_add - Add a device address
3859 * @dev: device
3860 * @addr: address to add
3861 * @addr_type: address type
3862 *
3863 * Add a device address to the device or increase the reference count if
3864 * it already exists.
3865 *
3866 * The caller must hold the rtnl_mutex.
3867 */
3868int dev_addr_add(struct net_device *dev, unsigned char *addr,
3869 unsigned char addr_type)
3870{
3871 int err;
3872
3873 ASSERT_RTNL();
3874
Jiri Pirko31278e72009-06-17 01:12:19 +00003875 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003876 if (!err)
3877 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3878 return err;
3879}
3880EXPORT_SYMBOL(dev_addr_add);
3881
3882/**
3883 * dev_addr_del - Release a device address.
3884 * @dev: device
3885 * @addr: address to delete
3886 * @addr_type: address type
3887 *
3888 * Release reference to a device address and remove it from the device
3889 * if the reference count drops to zero.
3890 *
3891 * The caller must hold the rtnl_mutex.
3892 */
3893int dev_addr_del(struct net_device *dev, unsigned char *addr,
3894 unsigned char addr_type)
3895{
3896 int err;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003897 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003898
3899 ASSERT_RTNL();
3900
Jiri Pirkoccffad22009-05-22 23:22:17 +00003901 /*
3902 * We can not remove the first address from the list because
3903 * dev->dev_addr points to that.
3904 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003905 ha = list_first_entry(&dev->dev_addrs.list,
3906 struct netdev_hw_addr, list);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003907 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3908 return -ENOENT;
3909
Jiri Pirko31278e72009-06-17 01:12:19 +00003910 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003911 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003912 if (!err)
3913 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3914 return err;
3915}
3916EXPORT_SYMBOL(dev_addr_del);
3917
3918/**
3919 * dev_addr_add_multiple - Add device addresses from another device
3920 * @to_dev: device to which addresses will be added
3921 * @from_dev: device from which addresses will be added
3922 * @addr_type: address type - 0 means type will be used from from_dev
3923 *
3924 * Add device addresses of the one device to another.
3925 **
3926 * The caller must hold the rtnl_mutex.
3927 */
3928int dev_addr_add_multiple(struct net_device *to_dev,
3929 struct net_device *from_dev,
3930 unsigned char addr_type)
3931{
3932 int err;
3933
3934 ASSERT_RTNL();
3935
3936 if (from_dev->addr_len != to_dev->addr_len)
3937 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003938 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003939 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003940 if (!err)
3941 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3942 return err;
3943}
3944EXPORT_SYMBOL(dev_addr_add_multiple);
3945
3946/**
3947 * dev_addr_del_multiple - Delete device addresses by another device
3948 * @to_dev: device where the addresses will be deleted
3949 * @from_dev: device by which addresses the addresses will be deleted
3950 * @addr_type: address type - 0 means type will used from from_dev
3951 *
3952 * Deletes addresses in to device by the list of addresses in from device.
3953 *
3954 * The caller must hold the rtnl_mutex.
3955 */
3956int dev_addr_del_multiple(struct net_device *to_dev,
3957 struct net_device *from_dev,
3958 unsigned char addr_type)
3959{
3960 ASSERT_RTNL();
3961
3962 if (from_dev->addr_len != to_dev->addr_len)
3963 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003964 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003965 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003966 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3967 return 0;
3968}
3969EXPORT_SYMBOL(dev_addr_del_multiple);
3970
Jiri Pirko31278e72009-06-17 01:12:19 +00003971/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003972
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003973int __dev_addr_delete(struct dev_addr_list **list, int *count,
3974 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003975{
3976 struct dev_addr_list *da;
3977
3978 for (; (da = *list) != NULL; list = &da->next) {
3979 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3980 alen == da->da_addrlen) {
3981 if (glbl) {
3982 int old_glbl = da->da_gusers;
3983 da->da_gusers = 0;
3984 if (old_glbl == 0)
3985 break;
3986 }
3987 if (--da->da_users)
3988 return 0;
3989
3990 *list = da->next;
3991 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003992 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003993 return 0;
3994 }
3995 }
3996 return -ENOENT;
3997}
3998
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003999int __dev_addr_add(struct dev_addr_list **list, int *count,
4000 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004001{
4002 struct dev_addr_list *da;
4003
4004 for (da = *list; da != NULL; da = da->next) {
4005 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4006 da->da_addrlen == alen) {
4007 if (glbl) {
4008 int old_glbl = da->da_gusers;
4009 da->da_gusers = 1;
4010 if (old_glbl)
4011 return 0;
4012 }
4013 da->da_users++;
4014 return 0;
4015 }
4016 }
4017
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08004018 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07004019 if (da == NULL)
4020 return -ENOMEM;
4021 memcpy(da->da_addr, addr, alen);
4022 da->da_addrlen = alen;
4023 da->da_users = 1;
4024 da->da_gusers = glbl ? 1 : 0;
4025 da->next = *list;
4026 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004027 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07004028 return 0;
4029}
4030
Patrick McHardy4417da62007-06-27 01:28:10 -07004031/**
4032 * dev_unicast_delete - Release secondary unicast address.
4033 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004034 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07004035 *
4036 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004037 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07004038 *
4039 * The caller must hold the rtnl_mutex.
4040 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00004041int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004042{
4043 int err;
4044
4045 ASSERT_RTNL();
4046
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004047 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004048 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4049 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004050 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004051 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004052 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004053 return err;
4054}
4055EXPORT_SYMBOL(dev_unicast_delete);
4056
4057/**
4058 * dev_unicast_add - add a secondary unicast address
4059 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004060 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004061 *
4062 * Add a secondary unicast address to the device or increase
4063 * the reference count if it already exists.
4064 *
4065 * The caller must hold the rtnl_mutex.
4066 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00004067int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004068{
4069 int err;
4070
4071 ASSERT_RTNL();
4072
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004073 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004074 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4075 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004076 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004077 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004078 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004079 return err;
4080}
4081EXPORT_SYMBOL(dev_unicast_add);
4082
Chris Leeche83a2ea2008-01-31 16:53:23 -08004083int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4084 struct dev_addr_list **from, int *from_count)
4085{
4086 struct dev_addr_list *da, *next;
4087 int err = 0;
4088
4089 da = *from;
4090 while (da != NULL) {
4091 next = da->next;
4092 if (!da->da_synced) {
4093 err = __dev_addr_add(to, to_count,
4094 da->da_addr, da->da_addrlen, 0);
4095 if (err < 0)
4096 break;
4097 da->da_synced = 1;
4098 da->da_users++;
4099 } else if (da->da_users == 1) {
4100 __dev_addr_delete(to, to_count,
4101 da->da_addr, da->da_addrlen, 0);
4102 __dev_addr_delete(from, from_count,
4103 da->da_addr, da->da_addrlen, 0);
4104 }
4105 da = next;
4106 }
4107 return err;
4108}
Johannes Bergc4029082009-06-17 17:43:30 +02004109EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004110
4111void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4112 struct dev_addr_list **from, int *from_count)
4113{
4114 struct dev_addr_list *da, *next;
4115
4116 da = *from;
4117 while (da != NULL) {
4118 next = da->next;
4119 if (da->da_synced) {
4120 __dev_addr_delete(to, to_count,
4121 da->da_addr, da->da_addrlen, 0);
4122 da->da_synced = 0;
4123 __dev_addr_delete(from, from_count,
4124 da->da_addr, da->da_addrlen, 0);
4125 }
4126 da = next;
4127 }
4128}
Johannes Bergc4029082009-06-17 17:43:30 +02004129EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004130
4131/**
4132 * dev_unicast_sync - Synchronize device's unicast list to another device
4133 * @to: destination device
4134 * @from: source device
4135 *
4136 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004137 * addresses that have no users left. The source device must be
4138 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004139 *
4140 * This function is intended to be called from the dev->set_rx_mode
4141 * function of layered software devices.
4142 */
4143int dev_unicast_sync(struct net_device *to, struct net_device *from)
4144{
4145 int err = 0;
4146
Jiri Pirkoccffad22009-05-22 23:22:17 +00004147 if (to->addr_len != from->addr_len)
4148 return -EINVAL;
4149
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004150 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004151 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004152 if (!err)
4153 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004154 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004155 return err;
4156}
4157EXPORT_SYMBOL(dev_unicast_sync);
4158
4159/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004160 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004161 * @to: destination device
4162 * @from: source device
4163 *
4164 * Remove all addresses that were added to the destination device by
4165 * dev_unicast_sync(). This function is intended to be called from the
4166 * dev->stop function of layered software devices.
4167 */
4168void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4169{
Jiri Pirkoccffad22009-05-22 23:22:17 +00004170 if (to->addr_len != from->addr_len)
4171 return;
4172
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004173 netif_addr_lock_bh(from);
4174 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004175 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004176 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004177 netif_addr_unlock(to);
4178 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004179}
4180EXPORT_SYMBOL(dev_unicast_unsync);
4181
Jiri Pirkoccffad22009-05-22 23:22:17 +00004182static void dev_unicast_flush(struct net_device *dev)
4183{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004184 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004185 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004186 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004187}
4188
4189static void dev_unicast_init(struct net_device *dev)
4190{
Jiri Pirko31278e72009-06-17 01:12:19 +00004191 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004192}
4193
4194
Denis Cheng12972622007-07-18 02:12:56 -07004195static void __dev_addr_discard(struct dev_addr_list **list)
4196{
4197 struct dev_addr_list *tmp;
4198
4199 while (*list != NULL) {
4200 tmp = *list;
4201 *list = tmp->next;
4202 if (tmp->da_users > tmp->da_gusers)
4203 printk("__dev_addr_discard: address leakage! "
4204 "da_users=%d\n", tmp->da_users);
4205 kfree(tmp);
4206 }
4207}
4208
Denis Cheng26cc2522007-07-18 02:12:03 -07004209static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004210{
David S. Millerb9e40852008-07-15 00:15:08 -07004211 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004212
Denis Cheng456ad752007-07-18 02:10:54 -07004213 __dev_addr_discard(&dev->mc_list);
4214 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004215
David S. Millerb9e40852008-07-15 00:15:08 -07004216 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004217}
4218
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004219/**
4220 * dev_get_flags - get flags reported to userspace
4221 * @dev: device
4222 *
4223 * Get the combination of flag bits exported through APIs to userspace.
4224 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225unsigned dev_get_flags(const struct net_device *dev)
4226{
4227 unsigned flags;
4228
4229 flags = (dev->flags & ~(IFF_PROMISC |
4230 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004231 IFF_RUNNING |
4232 IFF_LOWER_UP |
4233 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 (dev->gflags & (IFF_PROMISC |
4235 IFF_ALLMULTI));
4236
Stefan Rompfb00055a2006-03-20 17:09:11 -08004237 if (netif_running(dev)) {
4238 if (netif_oper_up(dev))
4239 flags |= IFF_RUNNING;
4240 if (netif_carrier_ok(dev))
4241 flags |= IFF_LOWER_UP;
4242 if (netif_dormant(dev))
4243 flags |= IFF_DORMANT;
4244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
4246 return flags;
4247}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004248EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004250/**
4251 * dev_change_flags - change device settings
4252 * @dev: device
4253 * @flags: device state flags
4254 *
4255 * Change settings on device based state flags. The flags are
4256 * in the userspace exported format.
4257 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258int dev_change_flags(struct net_device *dev, unsigned flags)
4259{
Thomas Graf7c355f52007-06-05 16:03:03 -07004260 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 int old_flags = dev->flags;
4262
Patrick McHardy24023452007-07-14 18:51:31 -07004263 ASSERT_RTNL();
4264
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265 /*
4266 * Set the flags on our device.
4267 */
4268
4269 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4270 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4271 IFF_AUTOMEDIA)) |
4272 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4273 IFF_ALLMULTI));
4274
4275 /*
4276 * Load in the correct multicast list now the flags have changed.
4277 */
4278
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004279 if ((old_flags ^ flags) & IFF_MULTICAST)
4280 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004281
Patrick McHardy4417da62007-06-27 01:28:10 -07004282 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283
4284 /*
4285 * Have we downed the interface. We handle IFF_UP ourselves
4286 * according to user attempts to set it, rather than blindly
4287 * setting it.
4288 */
4289
4290 ret = 0;
4291 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4292 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4293
4294 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004295 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 }
4297
4298 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004299 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004301 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302
4303 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004304 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4305
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 dev->gflags ^= IFF_PROMISC;
4307 dev_set_promiscuity(dev, inc);
4308 }
4309
4310 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4311 is important. Some (broken) drivers set IFF_PROMISC, when
4312 IFF_ALLMULTI is requested not asking us and not reporting.
4313 */
4314 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004315 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4316
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317 dev->gflags ^= IFF_ALLMULTI;
4318 dev_set_allmulti(dev, inc);
4319 }
4320
Thomas Graf7c355f52007-06-05 16:03:03 -07004321 /* Exclude state transition flags, already notified */
4322 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4323 if (changes)
4324 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 return ret;
4327}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004328EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004330/**
4331 * dev_set_mtu - Change maximum transfer unit
4332 * @dev: device
4333 * @new_mtu: new transfer unit
4334 *
4335 * Change the maximum transfer size of the network device.
4336 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337int dev_set_mtu(struct net_device *dev, int new_mtu)
4338{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004339 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 int err;
4341
4342 if (new_mtu == dev->mtu)
4343 return 0;
4344
4345 /* MTU must be positive. */
4346 if (new_mtu < 0)
4347 return -EINVAL;
4348
4349 if (!netif_device_present(dev))
4350 return -ENODEV;
4351
4352 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004353 if (ops->ndo_change_mtu)
4354 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 else
4356 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004357
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004359 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 return err;
4361}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004362EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004364/**
4365 * dev_set_mac_address - Change Media Access Control Address
4366 * @dev: device
4367 * @sa: new address
4368 *
4369 * Change the hardware (MAC) address of the device
4370 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4372{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004373 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 int err;
4375
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004376 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 return -EOPNOTSUPP;
4378 if (sa->sa_family != dev->type)
4379 return -EINVAL;
4380 if (!netif_device_present(dev))
4381 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004382 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004384 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004385 return err;
4386}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004387EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388
4389/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004390 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004392static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393{
4394 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004395 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396
4397 if (!dev)
4398 return -ENODEV;
4399
4400 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004401 case SIOCGIFFLAGS: /* Get interface flags */
4402 ifr->ifr_flags = (short) dev_get_flags(dev);
4403 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004405 case SIOCGIFMETRIC: /* Get the metric on the interface
4406 (currently unused) */
4407 ifr->ifr_metric = 0;
4408 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004410 case SIOCGIFMTU: /* Get the MTU of a device */
4411 ifr->ifr_mtu = dev->mtu;
4412 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004414 case SIOCGIFHWADDR:
4415 if (!dev->addr_len)
4416 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4417 else
4418 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4419 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4420 ifr->ifr_hwaddr.sa_family = dev->type;
4421 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004423 case SIOCGIFSLAVE:
4424 err = -EINVAL;
4425 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004426
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004427 case SIOCGIFMAP:
4428 ifr->ifr_map.mem_start = dev->mem_start;
4429 ifr->ifr_map.mem_end = dev->mem_end;
4430 ifr->ifr_map.base_addr = dev->base_addr;
4431 ifr->ifr_map.irq = dev->irq;
4432 ifr->ifr_map.dma = dev->dma;
4433 ifr->ifr_map.port = dev->if_port;
4434 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004435
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004436 case SIOCGIFINDEX:
4437 ifr->ifr_ifindex = dev->ifindex;
4438 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004439
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004440 case SIOCGIFTXQLEN:
4441 ifr->ifr_qlen = dev->tx_queue_len;
4442 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004443
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004444 default:
4445 /* dev_ioctl() should ensure this case
4446 * is never reached
4447 */
4448 WARN_ON(1);
4449 err = -EINVAL;
4450 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004451
4452 }
4453 return err;
4454}
4455
4456/*
4457 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4458 */
4459static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4460{
4461 int err;
4462 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004463 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004464
4465 if (!dev)
4466 return -ENODEV;
4467
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004468 ops = dev->netdev_ops;
4469
Jeff Garzik14e3e072007-10-08 00:06:32 -07004470 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004471 case SIOCSIFFLAGS: /* Set interface flags */
4472 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004473
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004474 case SIOCSIFMETRIC: /* Set the metric on the interface
4475 (currently unused) */
4476 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004477
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004478 case SIOCSIFMTU: /* Set the MTU of a device */
4479 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004480
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004481 case SIOCSIFHWADDR:
4482 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004484 case SIOCSIFHWBROADCAST:
4485 if (ifr->ifr_hwaddr.sa_family != dev->type)
4486 return -EINVAL;
4487 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4488 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4489 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4490 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004492 case SIOCSIFMAP:
4493 if (ops->ndo_set_config) {
4494 if (!netif_device_present(dev))
4495 return -ENODEV;
4496 return ops->ndo_set_config(dev, &ifr->ifr_map);
4497 }
4498 return -EOPNOTSUPP;
4499
4500 case SIOCADDMULTI:
4501 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4502 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4503 return -EINVAL;
4504 if (!netif_device_present(dev))
4505 return -ENODEV;
4506 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4507 dev->addr_len, 1);
4508
4509 case SIOCDELMULTI:
4510 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4511 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4512 return -EINVAL;
4513 if (!netif_device_present(dev))
4514 return -ENODEV;
4515 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4516 dev->addr_len, 1);
4517
4518 case SIOCSIFTXQLEN:
4519 if (ifr->ifr_qlen < 0)
4520 return -EINVAL;
4521 dev->tx_queue_len = ifr->ifr_qlen;
4522 return 0;
4523
4524 case SIOCSIFNAME:
4525 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4526 return dev_change_name(dev, ifr->ifr_newname);
4527
4528 /*
4529 * Unknown or private ioctl
4530 */
4531 default:
4532 if ((cmd >= SIOCDEVPRIVATE &&
4533 cmd <= SIOCDEVPRIVATE + 15) ||
4534 cmd == SIOCBONDENSLAVE ||
4535 cmd == SIOCBONDRELEASE ||
4536 cmd == SIOCBONDSETHWADDR ||
4537 cmd == SIOCBONDSLAVEINFOQUERY ||
4538 cmd == SIOCBONDINFOQUERY ||
4539 cmd == SIOCBONDCHANGEACTIVE ||
4540 cmd == SIOCGMIIPHY ||
4541 cmd == SIOCGMIIREG ||
4542 cmd == SIOCSMIIREG ||
4543 cmd == SIOCBRADDIF ||
4544 cmd == SIOCBRDELIF ||
4545 cmd == SIOCSHWTSTAMP ||
4546 cmd == SIOCWANDEV) {
4547 err = -EOPNOTSUPP;
4548 if (ops->ndo_do_ioctl) {
4549 if (netif_device_present(dev))
4550 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4551 else
4552 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004554 } else
4555 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556
4557 }
4558 return err;
4559}
4560
4561/*
4562 * This function handles all "interface"-type I/O control requests. The actual
4563 * 'doing' part of this is dev_ifsioc above.
4564 */
4565
4566/**
4567 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004568 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 * @cmd: command to issue
4570 * @arg: pointer to a struct ifreq in user space
4571 *
4572 * Issue ioctl functions to devices. This is normally called by the
4573 * user space syscall interfaces but can sometimes be useful for
4574 * other purposes. The return value is the return from the syscall if
4575 * positive or a negative errno code on error.
4576 */
4577
Eric W. Biederman881d9662007-09-17 11:56:21 -07004578int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579{
4580 struct ifreq ifr;
4581 int ret;
4582 char *colon;
4583
4584 /* One special case: SIOCGIFCONF takes ifconf argument
4585 and requires shared lock, because it sleeps writing
4586 to user space.
4587 */
4588
4589 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004590 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004591 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004592 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 return ret;
4594 }
4595 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004596 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597
4598 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4599 return -EFAULT;
4600
4601 ifr.ifr_name[IFNAMSIZ-1] = 0;
4602
4603 colon = strchr(ifr.ifr_name, ':');
4604 if (colon)
4605 *colon = 0;
4606
4607 /*
4608 * See which interface the caller is talking about.
4609 */
4610
4611 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004612 /*
4613 * These ioctl calls:
4614 * - can be done by all.
4615 * - atomic and do not require locking.
4616 * - return a value
4617 */
4618 case SIOCGIFFLAGS:
4619 case SIOCGIFMETRIC:
4620 case SIOCGIFMTU:
4621 case SIOCGIFHWADDR:
4622 case SIOCGIFSLAVE:
4623 case SIOCGIFMAP:
4624 case SIOCGIFINDEX:
4625 case SIOCGIFTXQLEN:
4626 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004627 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004628 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004629 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004630 if (!ret) {
4631 if (colon)
4632 *colon = ':';
4633 if (copy_to_user(arg, &ifr,
4634 sizeof(struct ifreq)))
4635 ret = -EFAULT;
4636 }
4637 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004639 case SIOCETHTOOL:
4640 dev_load(net, ifr.ifr_name);
4641 rtnl_lock();
4642 ret = dev_ethtool(net, &ifr);
4643 rtnl_unlock();
4644 if (!ret) {
4645 if (colon)
4646 *colon = ':';
4647 if (copy_to_user(arg, &ifr,
4648 sizeof(struct ifreq)))
4649 ret = -EFAULT;
4650 }
4651 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004653 /*
4654 * These ioctl calls:
4655 * - require superuser power.
4656 * - require strict serialization.
4657 * - return a value
4658 */
4659 case SIOCGMIIPHY:
4660 case SIOCGMIIREG:
4661 case SIOCSIFNAME:
4662 if (!capable(CAP_NET_ADMIN))
4663 return -EPERM;
4664 dev_load(net, ifr.ifr_name);
4665 rtnl_lock();
4666 ret = dev_ifsioc(net, &ifr, cmd);
4667 rtnl_unlock();
4668 if (!ret) {
4669 if (colon)
4670 *colon = ':';
4671 if (copy_to_user(arg, &ifr,
4672 sizeof(struct ifreq)))
4673 ret = -EFAULT;
4674 }
4675 return ret;
4676
4677 /*
4678 * These ioctl calls:
4679 * - require superuser power.
4680 * - require strict serialization.
4681 * - do not return a value
4682 */
4683 case SIOCSIFFLAGS:
4684 case SIOCSIFMETRIC:
4685 case SIOCSIFMTU:
4686 case SIOCSIFMAP:
4687 case SIOCSIFHWADDR:
4688 case SIOCSIFSLAVE:
4689 case SIOCADDMULTI:
4690 case SIOCDELMULTI:
4691 case SIOCSIFHWBROADCAST:
4692 case SIOCSIFTXQLEN:
4693 case SIOCSMIIREG:
4694 case SIOCBONDENSLAVE:
4695 case SIOCBONDRELEASE:
4696 case SIOCBONDSETHWADDR:
4697 case SIOCBONDCHANGEACTIVE:
4698 case SIOCBRADDIF:
4699 case SIOCBRDELIF:
4700 case SIOCSHWTSTAMP:
4701 if (!capable(CAP_NET_ADMIN))
4702 return -EPERM;
4703 /* fall through */
4704 case SIOCBONDSLAVEINFOQUERY:
4705 case SIOCBONDINFOQUERY:
4706 dev_load(net, ifr.ifr_name);
4707 rtnl_lock();
4708 ret = dev_ifsioc(net, &ifr, cmd);
4709 rtnl_unlock();
4710 return ret;
4711
4712 case SIOCGIFMEM:
4713 /* Get the per device memory space. We can add this but
4714 * currently do not support it */
4715 case SIOCSIFMEM:
4716 /* Set the per device memory buffer space.
4717 * Not applicable in our case */
4718 case SIOCSIFLINK:
4719 return -EINVAL;
4720
4721 /*
4722 * Unknown or private ioctl.
4723 */
4724 default:
4725 if (cmd == SIOCWANDEV ||
4726 (cmd >= SIOCDEVPRIVATE &&
4727 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004728 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004730 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004732 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004734 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004736 }
4737 /* Take care of Wireless Extensions */
4738 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4739 return wext_handle_ioctl(net, &ifr, cmd, arg);
4740 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 }
4742}
4743
4744
4745/**
4746 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004747 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 *
4749 * Returns a suitable unique value for a new device interface
4750 * number. The caller must hold the rtnl semaphore or the
4751 * dev_base_lock to be sure it remains unique.
4752 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004753static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754{
4755 static int ifindex;
4756 for (;;) {
4757 if (++ifindex <= 0)
4758 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004759 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 return ifindex;
4761 }
4762}
4763
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004765static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004767static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004768{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770}
4771
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004772static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004773{
Krishna Kumare93737b2009-12-08 22:26:02 +00004774 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004775
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004776 BUG_ON(dev_boot_phase);
4777 ASSERT_RTNL();
4778
Krishna Kumare93737b2009-12-08 22:26:02 +00004779 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004780 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004781 * for initialization unwind. Remove those
4782 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004783 */
4784 if (dev->reg_state == NETREG_UNINITIALIZED) {
4785 pr_debug("unregister_netdevice: device %s/%p never "
4786 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004787
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004788 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004789 list_del(&dev->unreg_list);
4790 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004791 }
4792
4793 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4794
4795 /* If device is running, close it first. */
4796 dev_close(dev);
4797
4798 /* And unlink it from device chain. */
4799 unlist_netdevice(dev);
4800
4801 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004802 }
4803
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004804 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004805
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004806 list_for_each_entry(dev, head, unreg_list) {
4807 /* Shutdown queueing discipline. */
4808 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004809
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004810
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004811 /* Notify protocols, that we are about to destroy
4812 this device. They should clean all the things.
4813 */
4814 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4815
4816 /*
4817 * Flush the unicast and multicast chains
4818 */
4819 dev_unicast_flush(dev);
4820 dev_addr_discard(dev);
4821
4822 if (dev->netdev_ops->ndo_uninit)
4823 dev->netdev_ops->ndo_uninit(dev);
4824
4825 /* Notifier chain MUST detach us from master device. */
4826 WARN_ON(dev->master);
4827
4828 /* Remove entries from kobject tree */
4829 netdev_unregister_kobject(dev);
4830 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004831
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004832 /* Process any work delayed until the end of the batch */
4833 dev = list_entry(head->next, struct net_device, unreg_list);
4834 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4835
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004836 synchronize_net();
4837
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004838 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004839 dev_put(dev);
4840}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004841
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004842static void rollback_registered(struct net_device *dev)
4843{
4844 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004845
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004846 list_add(&dev->unreg_list, &single);
4847 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004848}
4849
David S. Millere8a04642008-07-17 00:34:19 -07004850static void __netdev_init_queue_locks_one(struct net_device *dev,
4851 struct netdev_queue *dev_queue,
4852 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004853{
4854 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004855 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004856 dev_queue->xmit_lock_owner = -1;
4857}
4858
4859static void netdev_init_queue_locks(struct net_device *dev)
4860{
David S. Millere8a04642008-07-17 00:34:19 -07004861 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4862 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004863}
4864
Herbert Xub63365a2008-10-23 01:11:29 -07004865unsigned long netdev_fix_features(unsigned long features, const char *name)
4866{
4867 /* Fix illegal SG+CSUM combinations. */
4868 if ((features & NETIF_F_SG) &&
4869 !(features & NETIF_F_ALL_CSUM)) {
4870 if (name)
4871 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4872 "checksum feature.\n", name);
4873 features &= ~NETIF_F_SG;
4874 }
4875
4876 /* TSO requires that SG is present as well. */
4877 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4878 if (name)
4879 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4880 "SG feature.\n", name);
4881 features &= ~NETIF_F_TSO;
4882 }
4883
4884 if (features & NETIF_F_UFO) {
4885 if (!(features & NETIF_F_GEN_CSUM)) {
4886 if (name)
4887 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4888 "since no NETIF_F_HW_CSUM feature.\n",
4889 name);
4890 features &= ~NETIF_F_UFO;
4891 }
4892
4893 if (!(features & NETIF_F_SG)) {
4894 if (name)
4895 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4896 "since no NETIF_F_SG feature.\n", name);
4897 features &= ~NETIF_F_UFO;
4898 }
4899 }
4900
4901 return features;
4902}
4903EXPORT_SYMBOL(netdev_fix_features);
4904
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08004906 * netif_stacked_transfer_operstate - transfer operstate
4907 * @rootdev: the root or lower level device to transfer state from
4908 * @dev: the device to transfer operstate to
4909 *
4910 * Transfer operational state from root to device. This is normally
4911 * called when a stacking relationship exists between the root
4912 * device and the device(a leaf device).
4913 */
4914void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4915 struct net_device *dev)
4916{
4917 if (rootdev->operstate == IF_OPER_DORMANT)
4918 netif_dormant_on(dev);
4919 else
4920 netif_dormant_off(dev);
4921
4922 if (netif_carrier_ok(rootdev)) {
4923 if (!netif_carrier_ok(dev))
4924 netif_carrier_on(dev);
4925 } else {
4926 if (netif_carrier_ok(dev))
4927 netif_carrier_off(dev);
4928 }
4929}
4930EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4931
4932/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 * register_netdevice - register a network device
4934 * @dev: device to register
4935 *
4936 * Take a completed network device structure and add it to the kernel
4937 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4938 * chain. 0 is returned on success. A negative errno code is returned
4939 * on a failure to set up the device, or if the name is a duplicate.
4940 *
4941 * Callers must hold the rtnl semaphore. You may want
4942 * register_netdev() instead of this.
4943 *
4944 * BUGS:
4945 * The locking appears insufficient to guarantee two parallel registers
4946 * will not get the same name.
4947 */
4948
4949int register_netdevice(struct net_device *dev)
4950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004952 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953
4954 BUG_ON(dev_boot_phase);
4955 ASSERT_RTNL();
4956
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004957 might_sleep();
4958
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 /* When net_device's are persistent, this will be fatal. */
4960 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004961 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
David S. Millerf1f28aa2008-07-15 00:08:33 -07004963 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004964 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004965 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 dev->iflink = -1;
4968
4969 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004970 if (dev->netdev_ops->ndo_init) {
4971 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 if (ret) {
4973 if (ret > 0)
4974 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004975 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 }
4977 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004978
Octavian Purdilad9031022009-11-18 02:36:59 +00004979 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4980 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004981 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982
Eric W. Biederman881d9662007-09-17 11:56:21 -07004983 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 if (dev->iflink == -1)
4985 dev->iflink = dev->ifindex;
4986
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004987 /* Fix illegal checksum combinations */
4988 if ((dev->features & NETIF_F_HW_CSUM) &&
4989 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4990 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4991 dev->name);
4992 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4993 }
4994
4995 if ((dev->features & NETIF_F_NO_CSUM) &&
4996 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4997 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4998 dev->name);
4999 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5000 }
5001
Herbert Xub63365a2008-10-23 01:11:29 -07005002 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07005004 /* Enable software GSO if SG is supported. */
5005 if (dev->features & NETIF_F_SG)
5006 dev->features |= NETIF_F_GSO;
5007
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005008 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005009
5010 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5011 ret = notifier_to_errno(ret);
5012 if (ret)
5013 goto err_uninit;
5014
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005015 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005016 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005017 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005018 dev->reg_state = NETREG_REGISTERED;
5019
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 /*
5021 * Default initial state at registry is that the
5022 * device is present.
5023 */
5024
5025 set_bit(__LINK_STATE_PRESENT, &dev->state);
5026
Linus Torvalds1da177e2005-04-16 15:20:36 -07005027 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005029 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030
5031 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005032 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005033 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005034 if (ret) {
5035 rollback_registered(dev);
5036 dev->reg_state = NETREG_UNREGISTERED;
5037 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005038 /*
5039 * Prevent userspace races by waiting until the network
5040 * device is fully setup before sending notifications.
5041 */
5042 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043
5044out:
5045 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005046
5047err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005048 if (dev->netdev_ops->ndo_uninit)
5049 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005050 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005052EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053
5054/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005055 * init_dummy_netdev - init a dummy network device for NAPI
5056 * @dev: device to init
5057 *
5058 * This takes a network device structure and initialize the minimum
5059 * amount of fields so it can be used to schedule NAPI polls without
5060 * registering a full blown interface. This is to be used by drivers
5061 * that need to tie several hardware interfaces to a single NAPI
5062 * poll scheduler due to HW limitations.
5063 */
5064int init_dummy_netdev(struct net_device *dev)
5065{
5066 /* Clear everything. Note we don't initialize spinlocks
5067 * are they aren't supposed to be taken by any of the
5068 * NAPI code and this dummy netdev is supposed to be
5069 * only ever used for NAPI polls
5070 */
5071 memset(dev, 0, sizeof(struct net_device));
5072
5073 /* make sure we BUG if trying to hit standard
5074 * register/unregister code path
5075 */
5076 dev->reg_state = NETREG_DUMMY;
5077
5078 /* initialize the ref count */
5079 atomic_set(&dev->refcnt, 1);
5080
5081 /* NAPI wants this */
5082 INIT_LIST_HEAD(&dev->napi_list);
5083
5084 /* a dummy interface is started by default */
5085 set_bit(__LINK_STATE_PRESENT, &dev->state);
5086 set_bit(__LINK_STATE_START, &dev->state);
5087
5088 return 0;
5089}
5090EXPORT_SYMBOL_GPL(init_dummy_netdev);
5091
5092
5093/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094 * register_netdev - register a network device
5095 * @dev: device to register
5096 *
5097 * Take a completed network device structure and add it to the kernel
5098 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5099 * chain. 0 is returned on success. A negative errno code is returned
5100 * on a failure to set up the device, or if the name is a duplicate.
5101 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005102 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103 * and expands the device name if you passed a format string to
5104 * alloc_netdev.
5105 */
5106int register_netdev(struct net_device *dev)
5107{
5108 int err;
5109
5110 rtnl_lock();
5111
5112 /*
5113 * If the name is a format string the caller wants us to do a
5114 * name allocation.
5115 */
5116 if (strchr(dev->name, '%')) {
5117 err = dev_alloc_name(dev, dev->name);
5118 if (err < 0)
5119 goto out;
5120 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005121
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 err = register_netdevice(dev);
5123out:
5124 rtnl_unlock();
5125 return err;
5126}
5127EXPORT_SYMBOL(register_netdev);
5128
5129/*
5130 * netdev_wait_allrefs - wait until all references are gone.
5131 *
5132 * This is called when unregistering network devices.
5133 *
5134 * Any protocol or device that holds a reference should register
5135 * for netdevice notification, and cleanup and put back the
5136 * reference if they receive an UNREGISTER event.
5137 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005138 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 */
5140static void netdev_wait_allrefs(struct net_device *dev)
5141{
5142 unsigned long rebroadcast_time, warning_time;
5143
Eric Dumazete014deb2009-11-17 05:59:21 +00005144 linkwatch_forget_dev(dev);
5145
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 rebroadcast_time = warning_time = jiffies;
5147 while (atomic_read(&dev->refcnt) != 0) {
5148 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005149 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
5151 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005152 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005153 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005154 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155
5156 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5157 &dev->state)) {
5158 /* We must not have linkwatch events
5159 * pending on unregister. If this
5160 * happens, we simply run the queue
5161 * unscheduled, resulting in a noop
5162 * for this device.
5163 */
5164 linkwatch_run_queue();
5165 }
5166
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005167 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168
5169 rebroadcast_time = jiffies;
5170 }
5171
5172 msleep(250);
5173
5174 if (time_after(jiffies, warning_time + 10 * HZ)) {
5175 printk(KERN_EMERG "unregister_netdevice: "
5176 "waiting for %s to become free. Usage "
5177 "count = %d\n",
5178 dev->name, atomic_read(&dev->refcnt));
5179 warning_time = jiffies;
5180 }
5181 }
5182}
5183
5184/* The sequence is:
5185 *
5186 * rtnl_lock();
5187 * ...
5188 * register_netdevice(x1);
5189 * register_netdevice(x2);
5190 * ...
5191 * unregister_netdevice(y1);
5192 * unregister_netdevice(y2);
5193 * ...
5194 * rtnl_unlock();
5195 * free_netdev(y1);
5196 * free_netdev(y2);
5197 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005198 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005200 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 * without deadlocking with linkwatch via keventd.
5202 * 2) Since we run with the RTNL semaphore not held, we can sleep
5203 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005204 *
5205 * We must not return until all unregister events added during
5206 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208void netdev_run_todo(void)
5209{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005210 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005213 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005214
5215 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005216
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217 while (!list_empty(&list)) {
5218 struct net_device *dev
5219 = list_entry(list.next, struct net_device, todo_list);
5220 list_del(&dev->todo_list);
5221
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005222 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 printk(KERN_ERR "network todo '%s' but state %d\n",
5224 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005225 dump_stack();
5226 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005228
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005229 dev->reg_state = NETREG_UNREGISTERED;
5230
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005231 on_each_cpu(flush_backlog, dev, 1);
5232
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005233 netdev_wait_allrefs(dev);
5234
5235 /* paranoia */
5236 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005237 WARN_ON(dev->ip_ptr);
5238 WARN_ON(dev->ip6_ptr);
5239 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005240
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005241 if (dev->destructor)
5242 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005243
5244 /* Free network device */
5245 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247}
5248
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005249/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005250 * dev_txq_stats_fold - fold tx_queues stats
5251 * @dev: device to get statistics from
5252 * @stats: struct net_device_stats to hold results
5253 */
5254void dev_txq_stats_fold(const struct net_device *dev,
5255 struct net_device_stats *stats)
5256{
5257 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5258 unsigned int i;
5259 struct netdev_queue *txq;
5260
5261 for (i = 0; i < dev->num_tx_queues; i++) {
5262 txq = netdev_get_tx_queue(dev, i);
5263 tx_bytes += txq->tx_bytes;
5264 tx_packets += txq->tx_packets;
5265 tx_dropped += txq->tx_dropped;
5266 }
5267 if (tx_bytes || tx_packets || tx_dropped) {
5268 stats->tx_bytes = tx_bytes;
5269 stats->tx_packets = tx_packets;
5270 stats->tx_dropped = tx_dropped;
5271 }
5272}
5273EXPORT_SYMBOL(dev_txq_stats_fold);
5274
5275/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005276 * dev_get_stats - get network device statistics
5277 * @dev: device to get statistics from
5278 *
5279 * Get network statistics from device. The device driver may provide
5280 * its own method by setting dev->netdev_ops->get_stats; otherwise
5281 * the internal statistics structure is used.
5282 */
5283const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005284{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005285 const struct net_device_ops *ops = dev->netdev_ops;
5286
5287 if (ops->ndo_get_stats)
5288 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005289
Eric Dumazetd83345a2009-11-16 03:36:51 +00005290 dev_txq_stats_fold(dev, &dev->stats);
5291 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005292}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005293EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005294
David S. Millerdc2b4842008-07-08 17:18:23 -07005295static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005296 struct netdev_queue *queue,
5297 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005298{
David S. Millerdc2b4842008-07-08 17:18:23 -07005299 queue->dev = dev;
5300}
5301
David S. Millerbb949fb2008-07-08 16:55:56 -07005302static void netdev_init_queues(struct net_device *dev)
5303{
David S. Millere8a04642008-07-17 00:34:19 -07005304 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5305 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005306 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005307}
5308
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005310 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311 * @sizeof_priv: size of private data to allocate space for
5312 * @name: device name format string
5313 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005314 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 *
5316 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005317 * and performs basic initialization. Also allocates subquue structs
5318 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005320struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5321 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322{
David S. Millere8a04642008-07-17 00:34:19 -07005323 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005325 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005326 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005328 BUG_ON(strlen(name) >= sizeof(dev->name));
5329
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005330 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005331 if (sizeof_priv) {
5332 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005333 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005334 alloc_size += sizeof_priv;
5335 }
5336 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005337 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005339 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005341 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 return NULL;
5343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344
Stephen Hemminger79439862008-07-21 13:28:44 -07005345 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005346 if (!tx) {
5347 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5348 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005349 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005350 }
5351
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005352 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005354
5355 if (dev_addr_init(dev))
5356 goto free_tx;
5357
Jiri Pirkoccffad22009-05-22 23:22:17 +00005358 dev_unicast_init(dev);
5359
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005360 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005361
David S. Millere8a04642008-07-17 00:34:19 -07005362 dev->_tx = tx;
5363 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005364 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005365
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005366 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367
David S. Millerbb949fb2008-07-08 16:55:56 -07005368 netdev_init_queues(dev);
5369
Herbert Xud565b0a2008-12-15 23:38:52 -08005370 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005371 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005372 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005373 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 setup(dev);
5375 strcpy(dev->name, name);
5376 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005377
5378free_tx:
5379 kfree(tx);
5380
5381free_p:
5382 kfree(p);
5383 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005385EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386
5387/**
5388 * free_netdev - free network device
5389 * @dev: device
5390 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005391 * This function does the last stage of destroying an allocated device
5392 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005393 * If this is the last reference then it will be freed.
5394 */
5395void free_netdev(struct net_device *dev)
5396{
Herbert Xud565b0a2008-12-15 23:38:52 -08005397 struct napi_struct *p, *n;
5398
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005399 release_net(dev_net(dev));
5400
David S. Millere8a04642008-07-17 00:34:19 -07005401 kfree(dev->_tx);
5402
Jiri Pirkof001fde2009-05-05 02:48:28 +00005403 /* Flush device addresses */
5404 dev_addr_flush(dev);
5405
Herbert Xud565b0a2008-12-15 23:38:52 -08005406 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5407 netif_napi_del(p);
5408
Stephen Hemminger3041a062006-05-26 13:25:24 -07005409 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 if (dev->reg_state == NETREG_UNINITIALIZED) {
5411 kfree((char *)dev - dev->padded);
5412 return;
5413 }
5414
5415 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5416 dev->reg_state = NETREG_RELEASED;
5417
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005418 /* will free via device release */
5419 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005421EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005422
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005423/**
5424 * synchronize_net - Synchronize with packet receive processing
5425 *
5426 * Wait for packets currently being received to be done.
5427 * Does not block later packets from starting.
5428 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005429void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430{
5431 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005432 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005434EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435
5436/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005437 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005439 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005440 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005442 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005443 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 *
5445 * Callers must hold the rtnl semaphore. You may want
5446 * unregister_netdev() instead of this.
5447 */
5448
Eric Dumazet44a08732009-10-27 07:03:04 +00005449void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450{
Herbert Xua6620712007-12-12 19:21:56 -08005451 ASSERT_RTNL();
5452
Eric Dumazet44a08732009-10-27 07:03:04 +00005453 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005454 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005455 } else {
5456 rollback_registered(dev);
5457 /* Finish processing unregister after unlock */
5458 net_set_todo(dev);
5459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460}
Eric Dumazet44a08732009-10-27 07:03:04 +00005461EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462
5463/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005464 * unregister_netdevice_many - unregister many devices
5465 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005466 */
5467void unregister_netdevice_many(struct list_head *head)
5468{
5469 struct net_device *dev;
5470
5471 if (!list_empty(head)) {
5472 rollback_registered_many(head);
5473 list_for_each_entry(dev, head, unreg_list)
5474 net_set_todo(dev);
5475 }
5476}
Eric Dumazet63c80992009-10-27 07:06:49 +00005477EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005478
5479/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005480 * unregister_netdev - remove device from the kernel
5481 * @dev: device
5482 *
5483 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005484 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485 *
5486 * This is just a wrapper for unregister_netdevice that takes
5487 * the rtnl semaphore. In general you want to use this and not
5488 * unregister_netdevice.
5489 */
5490void unregister_netdev(struct net_device *dev)
5491{
5492 rtnl_lock();
5493 unregister_netdevice(dev);
5494 rtnl_unlock();
5495}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005496EXPORT_SYMBOL(unregister_netdev);
5497
Eric W. Biedermance286d32007-09-12 13:53:49 +02005498/**
5499 * dev_change_net_namespace - move device to different nethost namespace
5500 * @dev: device
5501 * @net: network namespace
5502 * @pat: If not NULL name pattern to try if the current device name
5503 * is already taken in the destination network namespace.
5504 *
5505 * This function shuts down a device interface and moves it
5506 * to a new network namespace. On success 0 is returned, on
5507 * a failure a netagive errno code is returned.
5508 *
5509 * Callers must hold the rtnl semaphore.
5510 */
5511
5512int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5513{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005514 int err;
5515
5516 ASSERT_RTNL();
5517
5518 /* Don't allow namespace local devices to be moved. */
5519 err = -EINVAL;
5520 if (dev->features & NETIF_F_NETNS_LOCAL)
5521 goto out;
5522
Eric W. Biederman38918452008-10-27 17:51:47 -07005523#ifdef CONFIG_SYSFS
5524 /* Don't allow real devices to be moved when sysfs
5525 * is enabled.
5526 */
5527 err = -EINVAL;
5528 if (dev->dev.parent)
5529 goto out;
5530#endif
5531
Eric W. Biedermance286d32007-09-12 13:53:49 +02005532 /* Ensure the device has been registrered */
5533 err = -EINVAL;
5534 if (dev->reg_state != NETREG_REGISTERED)
5535 goto out;
5536
5537 /* Get out if there is nothing todo */
5538 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005539 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005540 goto out;
5541
5542 /* Pick the destination device name, and ensure
5543 * we can use it in the destination network namespace.
5544 */
5545 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005546 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005547 /* We get here if we can't use the current device name */
5548 if (!pat)
5549 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005550 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005551 goto out;
5552 }
5553
5554 /*
5555 * And now a mini version of register_netdevice unregister_netdevice.
5556 */
5557
5558 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005559 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005560
5561 /* And unlink it from device chain */
5562 err = -ENODEV;
5563 unlist_netdevice(dev);
5564
5565 synchronize_net();
5566
5567 /* Shutdown queueing discipline. */
5568 dev_shutdown(dev);
5569
5570 /* Notify protocols, that we are about to destroy
5571 this device. They should clean all the things.
5572 */
5573 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005574 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005575
5576 /*
5577 * Flush the unicast and multicast chains
5578 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00005579 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005580 dev_addr_discard(dev);
5581
Eric W. Biederman38918452008-10-27 17:51:47 -07005582 netdev_unregister_kobject(dev);
5583
Eric W. Biedermance286d32007-09-12 13:53:49 +02005584 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005585 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005586
Eric W. Biedermance286d32007-09-12 13:53:49 +02005587 /* If there is an ifindex conflict assign a new one */
5588 if (__dev_get_by_index(net, dev->ifindex)) {
5589 int iflink = (dev->iflink == dev->ifindex);
5590 dev->ifindex = dev_new_index(net);
5591 if (iflink)
5592 dev->iflink = dev->ifindex;
5593 }
5594
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005595 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005596 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005597 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005598
5599 /* Add the device back in the hashes */
5600 list_netdevice(dev);
5601
5602 /* Notify protocols, that a new device appeared. */
5603 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5604
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005605 /*
5606 * Prevent userspace races by waiting until the network
5607 * device is fully setup before sending notifications.
5608 */
5609 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5610
Eric W. Biedermance286d32007-09-12 13:53:49 +02005611 synchronize_net();
5612 err = 0;
5613out:
5614 return err;
5615}
Johannes Berg463d0182009-07-14 00:33:35 +02005616EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005617
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618static int dev_cpu_callback(struct notifier_block *nfb,
5619 unsigned long action,
5620 void *ocpu)
5621{
5622 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005623 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005624 struct sk_buff *skb;
5625 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5626 struct softnet_data *sd, *oldsd;
5627
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005628 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 return NOTIFY_OK;
5630
5631 local_irq_disable();
5632 cpu = smp_processor_id();
5633 sd = &per_cpu(softnet_data, cpu);
5634 oldsd = &per_cpu(softnet_data, oldcpu);
5635
5636 /* Find end of our completion_queue. */
5637 list_skb = &sd->completion_queue;
5638 while (*list_skb)
5639 list_skb = &(*list_skb)->next;
5640 /* Append completion queue from offline CPU. */
5641 *list_skb = oldsd->completion_queue;
5642 oldsd->completion_queue = NULL;
5643
5644 /* Find end of our output_queue. */
5645 list_net = &sd->output_queue;
5646 while (*list_net)
5647 list_net = &(*list_net)->next_sched;
5648 /* Append output queue from offline CPU. */
5649 *list_net = oldsd->output_queue;
5650 oldsd->output_queue = NULL;
5651
5652 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5653 local_irq_enable();
5654
5655 /* Process offline CPU's input_pkt_queue */
5656 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5657 netif_rx(skb);
5658
5659 return NOTIFY_OK;
5660}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661
5662
Herbert Xu7f353bf2007-08-10 15:47:58 -07005663/**
Herbert Xub63365a2008-10-23 01:11:29 -07005664 * netdev_increment_features - increment feature set by one
5665 * @all: current feature set
5666 * @one: new feature set
5667 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005668 *
5669 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005670 * @one to the master device with current feature set @all. Will not
5671 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005672 */
Herbert Xub63365a2008-10-23 01:11:29 -07005673unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5674 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005675{
Herbert Xub63365a2008-10-23 01:11:29 -07005676 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005677 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005678 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5679 else if (mask & NETIF_F_ALL_CSUM) {
5680 /* If one device supports v4/v6 checksumming, set for all. */
5681 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5682 !(all & NETIF_F_GEN_CSUM)) {
5683 all &= ~NETIF_F_ALL_CSUM;
5684 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5685 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005686
Herbert Xub63365a2008-10-23 01:11:29 -07005687 /* If one device supports hw checksumming, set for all. */
5688 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5689 all &= ~NETIF_F_ALL_CSUM;
5690 all |= NETIF_F_HW_CSUM;
5691 }
5692 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005693
Herbert Xub63365a2008-10-23 01:11:29 -07005694 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005695
Herbert Xub63365a2008-10-23 01:11:29 -07005696 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005697 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005698 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005699
5700 return all;
5701}
Herbert Xub63365a2008-10-23 01:11:29 -07005702EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005703
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005704static struct hlist_head *netdev_create_hash(void)
5705{
5706 int i;
5707 struct hlist_head *hash;
5708
5709 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5710 if (hash != NULL)
5711 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5712 INIT_HLIST_HEAD(&hash[i]);
5713
5714 return hash;
5715}
5716
Eric W. Biederman881d9662007-09-17 11:56:21 -07005717/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005718static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005719{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005720 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005721
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005722 net->dev_name_head = netdev_create_hash();
5723 if (net->dev_name_head == NULL)
5724 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005725
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005726 net->dev_index_head = netdev_create_hash();
5727 if (net->dev_index_head == NULL)
5728 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005729
5730 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005731
5732err_idx:
5733 kfree(net->dev_name_head);
5734err_name:
5735 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005736}
5737
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005738/**
5739 * netdev_drivername - network driver for the device
5740 * @dev: network device
5741 * @buffer: buffer for resulting name
5742 * @len: size of buffer
5743 *
5744 * Determine network driver for device.
5745 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005746char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005747{
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005748 const struct device_driver *driver;
5749 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005750
5751 if (len <= 0 || !buffer)
5752 return buffer;
5753 buffer[0] = 0;
5754
5755 parent = dev->dev.parent;
5756
5757 if (!parent)
5758 return buffer;
5759
5760 driver = parent->driver;
5761 if (driver && driver->name)
5762 strlcpy(buffer, driver->name, len);
5763 return buffer;
5764}
5765
Pavel Emelyanov46650792007-10-08 20:38:39 -07005766static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005767{
5768 kfree(net->dev_name_head);
5769 kfree(net->dev_index_head);
5770}
5771
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005772static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005773 .init = netdev_init,
5774 .exit = netdev_exit,
5775};
5776
Pavel Emelyanov46650792007-10-08 20:38:39 -07005777static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005778{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005779 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005780 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005781 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02005782 * initial network namespace
5783 */
5784 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005785 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005786 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005787 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005788
5789 /* Ignore unmoveable devices (i.e. loopback) */
5790 if (dev->features & NETIF_F_NETNS_LOCAL)
5791 continue;
5792
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005793 /* Leave virtual devices for the generic cleanup */
5794 if (dev->rtnl_link_ops)
5795 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005796
Eric W. Biedermance286d32007-09-12 13:53:49 +02005797 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005798 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5799 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005800 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005801 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005802 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005803 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005804 }
5805 }
5806 rtnl_unlock();
5807}
5808
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00005809static void __net_exit default_device_exit_batch(struct list_head *net_list)
5810{
5811 /* At exit all network devices most be removed from a network
5812 * namespace. Do this in the reverse order of registeration.
5813 * Do this across as many network namespaces as possible to
5814 * improve batching efficiency.
5815 */
5816 struct net_device *dev;
5817 struct net *net;
5818 LIST_HEAD(dev_kill_list);
5819
5820 rtnl_lock();
5821 list_for_each_entry(net, net_list, exit_list) {
5822 for_each_netdev_reverse(net, dev) {
5823 if (dev->rtnl_link_ops)
5824 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5825 else
5826 unregister_netdevice_queue(dev, &dev_kill_list);
5827 }
5828 }
5829 unregister_netdevice_many(&dev_kill_list);
5830 rtnl_unlock();
5831}
5832
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005833static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005834 .exit = default_device_exit,
Eric W. Biederman04dc7f62009-12-03 02:29:04 +00005835 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02005836};
5837
Linus Torvalds1da177e2005-04-16 15:20:36 -07005838/*
5839 * Initialize the DEV module. At boot time this walks the device list and
5840 * unhooks any devices that fail to initialise (normally hardware not
5841 * present) and leaves us with a valid list of present and active devices.
5842 *
5843 */
5844
5845/*
5846 * This is called single threaded during boot, so no need
5847 * to take the rtnl semaphore.
5848 */
5849static int __init net_dev_init(void)
5850{
5851 int i, rc = -ENOMEM;
5852
5853 BUG_ON(!dev_boot_phase);
5854
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 if (dev_proc_init())
5856 goto out;
5857
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005858 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005859 goto out;
5860
5861 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08005862 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863 INIT_LIST_HEAD(&ptype_base[i]);
5864
Eric W. Biederman881d9662007-09-17 11:56:21 -07005865 if (register_pernet_subsys(&netdev_net_ops))
5866 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005867
5868 /*
5869 * Initialise the packet receive queues.
5870 */
5871
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005872 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005873 struct softnet_data *queue;
5874
5875 queue = &per_cpu(softnet_data, i);
5876 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005877 queue->completion_queue = NULL;
5878 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005879
5880 queue->backlog.poll = process_backlog;
5881 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005882 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005883 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005884 }
5885
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886 dev_boot_phase = 0;
5887
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005888 /* The loopback device is special if any other network devices
5889 * is present in a network namespace the loopback device must
5890 * be present. Since we now dynamically allocate and free the
5891 * loopback device ensure this invariant is maintained by
5892 * keeping the loopback device as the first device on the
5893 * list of network devices. Ensuring the loopback devices
5894 * is the first device that appears and the last network device
5895 * that disappears.
5896 */
5897 if (register_pernet_device(&loopback_net_ops))
5898 goto out;
5899
5900 if (register_pernet_device(&default_device_ops))
5901 goto out;
5902
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005903 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5904 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905
5906 hotcpu_notifier(dev_cpu_callback, 0);
5907 dst_init();
5908 dev_mcast_init();
5909 rc = 0;
5910out:
5911 return rc;
5912}
5913
5914subsys_initcall(net_dev_init);
5915
Krishna Kumare88721f2009-02-18 17:55:02 -08005916static int __init initialize_hashrnd(void)
5917{
5918 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5919 return 0;
5920}
5921
5922late_initcall_sync(initialize_hashrnd);
5923