blob: bcc490cc9452f3e87273836ef406c54608c49084 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
103#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700104#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
106#include <net/pkt_sched.h>
107#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000108#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <linux/highmem.h>
110#include <linux/init.h>
111#include <linux/kmod.h>
112#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700116#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500119#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700120#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700121#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700122#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700123#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700124#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700126#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700127#include <linux/ipv6.h>
128#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700129#include <linux/jhash.h>
130#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700131#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700133#include "net-sysfs.h"
134
Herbert Xud565b0a2008-12-15 23:38:52 -0800135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
Herbert Xu5d38a072009-01-04 16:13:40 -0800138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700152 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700174static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * semaphore.
179 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 *
182 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700183 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196EXPORT_SYMBOL(dev_base_lock);
197
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
Eric W. Biederman881d9662007-09-17 11:56:21 -0700204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
Eric W. Biedermance286d32007-09-12 13:53:49 +0200209/* Device list insertion */
210static int list_netdevice(struct net_device *dev)
211{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900212 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200213
214 ASSERT_RTNL();
215
216 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800217 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000218 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000219 hlist_add_head_rcu(&dev->index_hlist,
220 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000225/* Device list removal
226 * caller must respect a RCU grace period before freeing/reusing dev
227 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200228static void unlist_netdevice(struct net_device *dev)
229{
230 ASSERT_RTNL();
231
232 /* Unlink dev from the device chain */
233 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800234 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000235 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000236 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200237 write_unlock_bh(&dev_base_lock);
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/*
241 * Our notifier list
242 */
243
Alan Sternf07d5b92006-05-09 15:23:03 -0700244static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246/*
247 * Device drivers call our routines to queue packets here. We empty the
248 * queue in the local softnet handler.
249 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700250
251DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700252EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
David S. Millercf508b12008-07-22 14:16:42 -0700254#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255/*
David S. Millerc773e842008-07-08 23:13:53 -0700256 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700257 * according to dev->type
258 */
259static const unsigned short netdev_lock_type[] =
260 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
261 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
262 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
263 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
264 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
265 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
266 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
267 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
268 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
269 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
270 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
271 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
272 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800273 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400274 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000275 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700277static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700278 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
279 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
280 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
281 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
282 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
283 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
284 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
285 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
286 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
287 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
288 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
289 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
290 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800291 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400292 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000293 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700294
295static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700296static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700297
298static inline unsigned short netdev_lock_pos(unsigned short dev_type)
299{
300 int i;
301
302 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
303 if (netdev_lock_type[i] == dev_type)
304 return i;
305 /* the last key is used by default */
306 return ARRAY_SIZE(netdev_lock_type) - 1;
307}
308
David S. Millercf508b12008-07-22 14:16:42 -0700309static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
310 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700311{
312 int i;
313
314 i = netdev_lock_pos(dev_type);
315 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
316 netdev_lock_name[i]);
317}
David S. Millercf508b12008-07-22 14:16:42 -0700318
319static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
320{
321 int i;
322
323 i = netdev_lock_pos(dev->type);
324 lockdep_set_class_and_name(&dev->addr_list_lock,
325 &netdev_addr_lock_key[i],
326 netdev_lock_name[i]);
327}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700328#else
David S. Millercf508b12008-07-22 14:16:42 -0700329static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
330 unsigned short dev_type)
331{
332}
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700334{
335}
336#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/*******************************************************************************
339
340 Protocol management and registration routines
341
342*******************************************************************************/
343
344/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Add a protocol ID to the list. Now that the input handler is
346 * smarter we can dispense with all the messy stuff that used to be
347 * here.
348 *
349 * BEWARE!!! Protocol handlers, mangling input packets,
350 * MUST BE last in hash buckets and checking protocol handlers
351 * MUST start from promiscuous ptype_all chain in net_bh.
352 * It is true now, do not change it.
353 * Explanation follows: if protocol handler, mangling packet, will
354 * be the first on list, it is not able to sense, that packet
355 * is cloned and should be copied-on-write, so that it will
356 * change it and subsequent readers will get broken packet.
357 * --ANK (980803)
358 */
359
360/**
361 * dev_add_pack - add packet handler
362 * @pt: packet type declaration
363 *
364 * Add a protocol handler to the networking stack. The passed &packet_type
365 * is linked into kernel lists and may not be freed until it has been
366 * removed from the kernel lists.
367 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900368 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 * guarantee all CPU's that are in middle of receiving packets
370 * will see the new packet type (until the next received packet).
371 */
372
373void dev_add_pack(struct packet_type *pt)
374{
375 int hash;
376
377 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700380 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800381 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 list_add_rcu(&pt->list, &ptype_base[hash]);
383 }
384 spin_unlock_bh(&ptype_lock);
385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700386EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388/**
389 * __dev_remove_pack - remove packet handler
390 * @pt: packet type declaration
391 *
392 * Remove a protocol handler that was previously added to the kernel
393 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
394 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900395 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 *
397 * The packet type might still be in use by receivers
398 * and must not be freed until after all the CPU's have gone
399 * through a quiescent state.
400 */
401void __dev_remove_pack(struct packet_type *pt)
402{
403 struct list_head *head;
404 struct packet_type *pt1;
405
406 spin_lock_bh(&ptype_lock);
407
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700410 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800411 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 list_for_each_entry(pt1, head, list) {
414 if (pt == pt1) {
415 list_del_rcu(&pt->list);
416 goto out;
417 }
418 }
419
420 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
421out:
422 spin_unlock_bh(&ptype_lock);
423}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700424EXPORT_SYMBOL(__dev_remove_pack);
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
427 * dev_remove_pack - remove packet handler
428 * @pt: packet type declaration
429 *
430 * Remove a protocol handler that was previously added to the kernel
431 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
432 * from the kernel lists and can be freed or reused once this function
433 * returns.
434 *
435 * This call sleeps to guarantee that no CPU is looking at the packet
436 * type after return.
437 */
438void dev_remove_pack(struct packet_type *pt)
439{
440 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 synchronize_net();
443}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700444EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446/******************************************************************************
447
448 Device Boot-time Settings Routines
449
450*******************************************************************************/
451
452/* Boot time configuration table */
453static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
454
455/**
456 * netdev_boot_setup_add - add new setup entry
457 * @name: name of the device
458 * @map: configured settings for the device
459 *
460 * Adds new setup entry to the dev_boot_setup list. The function
461 * returns 0 on error and 1 on success. This is a generic routine to
462 * all netdevices.
463 */
464static int netdev_boot_setup_add(char *name, struct ifmap *map)
465{
466 struct netdev_boot_setup *s;
467 int i;
468
469 s = dev_boot_setup;
470 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
471 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
472 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700473 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 memcpy(&s[i].map, map, sizeof(s[i].map));
475 break;
476 }
477 }
478
479 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
480}
481
482/**
483 * netdev_boot_setup_check - check boot time settings
484 * @dev: the netdevice
485 *
486 * Check boot time settings for the device.
487 * The found settings are set for the device to be used
488 * later in the device probing.
489 * Returns 0 if no settings found, 1 if they are.
490 */
491int netdev_boot_setup_check(struct net_device *dev)
492{
493 struct netdev_boot_setup *s = dev_boot_setup;
494 int i;
495
496 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
497 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700498 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 dev->irq = s[i].map.irq;
500 dev->base_addr = s[i].map.base_addr;
501 dev->mem_start = s[i].map.mem_start;
502 dev->mem_end = s[i].map.mem_end;
503 return 1;
504 }
505 }
506 return 0;
507}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700508EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510
511/**
512 * netdev_boot_base - get address from boot time settings
513 * @prefix: prefix for network device
514 * @unit: id for network device
515 *
516 * Check boot time settings for the base address of device.
517 * The found settings are set for the device to be used
518 * later in the device probing.
519 * Returns 0 if no settings found.
520 */
521unsigned long netdev_boot_base(const char *prefix, int unit)
522{
523 const struct netdev_boot_setup *s = dev_boot_setup;
524 char name[IFNAMSIZ];
525 int i;
526
527 sprintf(name, "%s%d", prefix, unit);
528
529 /*
530 * If device already registered then return base of 1
531 * to indicate not to probe for this interface
532 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700533 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 return 1;
535
536 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
537 if (!strcmp(name, s[i].name))
538 return s[i].map.base_addr;
539 return 0;
540}
541
542/*
543 * Saves at boot time configured settings for any netdevice.
544 */
545int __init netdev_boot_setup(char *str)
546{
547 int ints[5];
548 struct ifmap map;
549
550 str = get_options(str, ARRAY_SIZE(ints), ints);
551 if (!str || !*str)
552 return 0;
553
554 /* Save settings */
555 memset(&map, 0, sizeof(map));
556 if (ints[0] > 0)
557 map.irq = ints[1];
558 if (ints[0] > 1)
559 map.base_addr = ints[2];
560 if (ints[0] > 2)
561 map.mem_start = ints[3];
562 if (ints[0] > 3)
563 map.mem_end = ints[4];
564
565 /* Add new entry to the list */
566 return netdev_boot_setup_add(str, &map);
567}
568
569__setup("netdev=", netdev_boot_setup);
570
571/*******************************************************************************
572
573 Device Interface Subroutines
574
575*******************************************************************************/
576
577/**
578 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700579 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * @name: name to find
581 *
582 * Find an interface by name. Must be called under RTNL semaphore
583 * or @dev_base_lock. If the name is found a pointer to the device
584 * is returned. If the name is not found then %NULL is returned. The
585 * reference counters are not incremented so the caller must be
586 * careful with locks.
587 */
588
Eric W. Biederman881d9662007-09-17 11:56:21 -0700589struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700592 struct net_device *dev;
593 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700595 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 if (!strncmp(dev->name, name, IFNAMSIZ))
597 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 return NULL;
600}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700601EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
603/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000604 * dev_get_by_name_rcu - find a device by its name
605 * @net: the applicable net namespace
606 * @name: name to find
607 *
608 * Find an interface by name.
609 * If the name is found a pointer to the device is returned.
610 * If the name is not found then %NULL is returned.
611 * The reference counters are not incremented so the caller must be
612 * careful with locks. The caller must hold RCU lock.
613 */
614
615struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
616{
617 struct hlist_node *p;
618 struct net_device *dev;
619 struct hlist_head *head = dev_name_hash(net, name);
620
621 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
622 if (!strncmp(dev->name, name, IFNAMSIZ))
623 return dev;
624
625 return NULL;
626}
627EXPORT_SYMBOL(dev_get_by_name_rcu);
628
629/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @name: name to find
633 *
634 * Find an interface by name. This can be called from any
635 * context and does its own locking. The returned handle has
636 * the usage count incremented and the caller must use dev_put() to
637 * release it when it is no longer needed. %NULL is returned if no
638 * matching device is found.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct net_device *dev;
644
Eric Dumazet72c95282009-10-30 07:11:27 +0000645 rcu_read_lock();
646 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (dev)
648 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000649 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return dev;
651}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700652EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654/**
655 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700656 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 * @ifindex: index of device
658 *
659 * Search for an interface by index. Returns %NULL if the device
660 * is not found or a pointer to the device. The device has not
661 * had its reference counter increased so the caller must be careful
662 * about locking. The caller must hold either the RTNL semaphore
663 * or @dev_base_lock.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700669 struct net_device *dev;
670 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700672 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (dev->ifindex == ifindex)
674 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return NULL;
677}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700678EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000680/**
681 * dev_get_by_index_rcu - find a device by its ifindex
682 * @net: the applicable net namespace
683 * @ifindex: index of device
684 *
685 * Search for an interface by index. Returns %NULL if the device
686 * is not found or a pointer to the device. The device has not
687 * had its reference counter increased so the caller must be careful
688 * about locking. The caller must hold RCU lock.
689 */
690
691struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
692{
693 struct hlist_node *p;
694 struct net_device *dev;
695 struct hlist_head *head = dev_index_hash(net, ifindex);
696
697 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
698 if (dev->ifindex == ifindex)
699 return dev;
700
701 return NULL;
702}
703EXPORT_SYMBOL(dev_get_by_index_rcu);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/**
707 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700708 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * @ifindex: index of device
710 *
711 * Search for an interface by index. Returns NULL if the device
712 * is not found or a pointer to the device. The device returned has
713 * had a reference added and the pointer is safe until the user calls
714 * dev_put to indicate they have finished with it.
715 */
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000721 rcu_read_lock();
722 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 if (dev)
724 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000725 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return dev;
727}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700728EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730/**
731 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700732 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * @type: media type of device
734 * @ha: hardware address
735 *
736 * Search for an interface by MAC address. Returns NULL if the device
737 * is not found or a pointer to the device. The caller must hold the
738 * rtnl semaphore. The returned device has not had its ref count increased
739 * and the caller must therefore be careful about locking
740 *
741 * BUGS:
742 * If the API was consistent this would be __dev_get_by_hwaddr
743 */
744
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
747 struct net_device *dev;
748
749 ASSERT_RTNL();
750
Denis V. Lunev81103a52007-12-12 10:47:38 -0800751 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (dev->type == type &&
753 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700754 return dev;
755
756 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300758EXPORT_SYMBOL(dev_getbyhwaddr);
759
Eric W. Biederman881d9662007-09-17 11:56:21 -0700760struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700761{
762 struct net_device *dev;
763
764 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700765 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700766 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700767 return dev;
768
769 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700770}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700771EXPORT_SYMBOL(__dev_getfirstbyhwtype);
772
Eric W. Biederman881d9662007-09-17 11:56:21 -0700773struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774{
775 struct net_device *dev;
776
777 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700778 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700779 if (dev)
780 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 rtnl_unlock();
782 return dev;
783}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784EXPORT_SYMBOL(dev_getfirstbyhwtype);
785
786/**
787 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700788 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 * @if_flags: IFF_* values
790 * @mask: bitmask of bits in if_flags to check
791 *
792 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900793 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
796 */
797
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700798struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
799 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Pavel Emelianov7562f872007-05-03 15:13:45 -0700803 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 if (((dev->flags ^ if_flags) & mask) == 0) {
807 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700808 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810 }
811 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800812 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700813 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700815EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/**
818 * dev_valid_name - check if name is okay for network device
819 * @name: name string
820 *
821 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700822 * to allow sysfs to work. We also disallow any kind of
823 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800825int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700827 if (*name == '\0')
828 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700829 if (strlen(name) >= IFNAMSIZ)
830 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700831 if (!strcmp(name, ".") || !strcmp(name, ".."))
832 return 0;
833
834 while (*name) {
835 if (*name == '/' || isspace(*name))
836 return 0;
837 name++;
838 }
839 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700841EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 * __dev_alloc_name - allocate a name for a device
845 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200847 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 */
857
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200858static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859{
860 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 const char *p;
862 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700863 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 struct net_device *d;
865
866 p = strnchr(name, IFNAMSIZ-1, '%');
867 if (p) {
868 /*
869 * Verify the string as this thing may have come from
870 * the user. There must be either one "%d" and no other "%"
871 * characters.
872 */
873 if (p[1] != 'd' || strchr(p + 2, '%'))
874 return -EINVAL;
875
876 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700877 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (!inuse)
879 return -ENOMEM;
880
Eric W. Biederman881d9662007-09-17 11:56:21 -0700881 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 if (!sscanf(d->name, name, &i))
883 continue;
884 if (i < 0 || i >= max_netdevices)
885 continue;
886
887 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200888 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 if (!strncmp(buf, d->name, IFNAMSIZ))
890 set_bit(i, inuse);
891 }
892
893 i = find_first_zero_bit(inuse, max_netdevices);
894 free_page((unsigned long) inuse);
895 }
896
Octavian Purdilad9031022009-11-18 02:36:59 +0000897 if (buf != name)
898 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200899 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 /* It is possible to run out of possible slots
903 * when the name is long and there isn't enough space left
904 * for the digits, or if all bits are used.
905 */
906 return -ENFILE;
907}
908
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200909/**
910 * dev_alloc_name - allocate a name for a device
911 * @dev: device
912 * @name: name format string
913 *
914 * Passed a format string - eg "lt%d" it will try and find a suitable
915 * id. It scans list of devices to build up a free map, then chooses
916 * the first empty slot. The caller must hold the dev_base or rtnl lock
917 * while allocating the name and adding the device in order to avoid
918 * duplicates.
919 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
920 * Returns the number of the unit assigned or a negative errno code.
921 */
922
923int dev_alloc_name(struct net_device *dev, const char *name)
924{
925 char buf[IFNAMSIZ];
926 struct net *net;
927 int ret;
928
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900929 BUG_ON(!dev_net(dev));
930 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200931 ret = __dev_alloc_name(net, name, buf);
932 if (ret >= 0)
933 strlcpy(dev->name, buf, IFNAMSIZ);
934 return ret;
935}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700936EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200937
Octavian Purdilad9031022009-11-18 02:36:59 +0000938static int dev_get_valid_name(struct net *net, const char *name, char *buf,
939 bool fmt)
940{
941 if (!dev_valid_name(name))
942 return -EINVAL;
943
944 if (fmt && strchr(name, '%'))
945 return __dev_alloc_name(net, name, buf);
946 else if (__dev_get_by_name(net, name))
947 return -EEXIST;
948 else if (buf != name)
949 strlcpy(buf, name, IFNAMSIZ);
950
951 return 0;
952}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
954/**
955 * dev_change_name - change name of a device
956 * @dev: device
957 * @newname: name (or format string) must be at least IFNAMSIZ
958 *
959 * Change name of a device, can pass format strings "eth%d".
960 * for wildcarding.
961 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700962int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963{
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700966 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700967 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
969 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900970 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900972 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (dev->flags & IFF_UP)
974 return -EBUSY;
975
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700976 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
977 return 0;
978
Herbert Xufcc5a032007-07-30 17:03:38 -0700979 memcpy(oldname, dev->name, IFNAMSIZ);
980
Octavian Purdilad9031022009-11-18 02:36:59 +0000981 err = dev_get_valid_name(net, newname, dev->name, 1);
982 if (err < 0)
983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Herbert Xufcc5a032007-07-30 17:03:38 -0700985rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700986 /* For now only devices in the initial network namespace
987 * are in sysfs.
988 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800989 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -0700990 ret = device_rename(&dev->dev, dev->name);
991 if (ret) {
992 memcpy(dev->name, oldname, IFNAMSIZ);
993 return ret;
994 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700995 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700996
997 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600998 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000999 write_unlock_bh(&dev_base_lock);
1000
1001 synchronize_rcu();
1002
1003 write_lock_bh(&dev_base_lock);
1004 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001005 write_unlock_bh(&dev_base_lock);
1006
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001007 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001008 ret = notifier_to_errno(ret);
1009
1010 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001011 /* err >= 0 after dev_alloc_name() or stores the first errno */
1012 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001013 err = ret;
1014 memcpy(dev->name, oldname, IFNAMSIZ);
1015 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001016 } else {
1017 printk(KERN_ERR
1018 "%s: name change rollback failed: %d.\n",
1019 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001020 }
1021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 return err;
1024}
1025
1026/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001027 * dev_set_alias - change ifalias of a device
1028 * @dev: device
1029 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001030 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001031 *
1032 * Set ifalias for a device,
1033 */
1034int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1035{
1036 ASSERT_RTNL();
1037
1038 if (len >= IFALIASZ)
1039 return -EINVAL;
1040
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001041 if (!len) {
1042 if (dev->ifalias) {
1043 kfree(dev->ifalias);
1044 dev->ifalias = NULL;
1045 }
1046 return 0;
1047 }
1048
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001049 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001050 if (!dev->ifalias)
1051 return -ENOMEM;
1052
1053 strlcpy(dev->ifalias, alias, len+1);
1054 return len;
1055}
1056
1057
1058/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001059 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001060 * @dev: device to cause notification
1061 *
1062 * Called to indicate a device has changed features.
1063 */
1064void netdev_features_change(struct net_device *dev)
1065{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001066 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001067}
1068EXPORT_SYMBOL(netdev_features_change);
1069
1070/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 * netdev_state_change - device changes state
1072 * @dev: device to cause notification
1073 *
1074 * Called to indicate a device has changed state. This function calls
1075 * the notifier chains for netdev_chain and sends a NEWLINK message
1076 * to the routing socket.
1077 */
1078void netdev_state_change(struct net_device *dev)
1079{
1080 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001081 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1083 }
1084}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001085EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Moni Shoua75c78502009-09-15 02:37:40 -07001087void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001088{
Moni Shoua75c78502009-09-15 02:37:40 -07001089 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001090}
1091EXPORT_SYMBOL(netdev_bonding_change);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/**
1094 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001095 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 * @name: name of interface
1097 *
1098 * If a network interface is not present and the process has suitable
1099 * privileges this function loads the module. If module loading is not
1100 * available in this kernel then it becomes a nop.
1101 */
1102
Eric W. Biederman881d9662007-09-17 11:56:21 -07001103void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001105 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Eric Dumazet72c95282009-10-30 07:11:27 +00001107 rcu_read_lock();
1108 dev = dev_get_by_name_rcu(net, name);
1109 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Eric Parisa8f80e82009-08-13 09:44:51 -04001111 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 request_module("%s", name);
1113}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001114EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Patrick McHardybd380812010-02-26 06:34:53 +00001116static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001118 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001119 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001121 ASSERT_RTNL();
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 * Is it even present?
1125 */
1126 if (!netif_device_present(dev))
1127 return -ENODEV;
1128
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001129 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1130 ret = notifier_to_errno(ret);
1131 if (ret)
1132 return ret;
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 /*
1135 * Call device private open method
1136 */
1137 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001138
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001139 if (ops->ndo_validate_addr)
1140 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001141
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001142 if (!ret && ops->ndo_open)
1143 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001145 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 * If it went open OK then:
1147 */
1148
Jeff Garzikbada3392007-10-23 20:19:37 -07001149 if (ret)
1150 clear_bit(__LINK_STATE_START, &dev->state);
1151 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Set the flags.
1154 */
1155 dev->flags |= IFF_UP;
1156
1157 /*
Dan Williams649274d2009-01-11 00:20:39 -08001158 * Enable NET_DMA
1159 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001160 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001161
1162 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 * Initialize multicasting status
1164 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001165 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
1167 /*
1168 * Wakeup transmit queue engine
1169 */
1170 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001172
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 return ret;
1174}
Patrick McHardybd380812010-02-26 06:34:53 +00001175
1176/**
1177 * dev_open - prepare an interface for use.
1178 * @dev: device to open
1179 *
1180 * Takes a device from down to up state. The device's private open
1181 * function is invoked and then the multicast lists are loaded. Finally
1182 * the device is moved into the up state and a %NETDEV_UP message is
1183 * sent to the netdev notifier chain.
1184 *
1185 * Calling this function on an active interface is a nop. On a failure
1186 * a negative errno code is returned.
1187 */
1188int dev_open(struct net_device *dev)
1189{
1190 int ret;
1191
1192 /*
1193 * Is it already up?
1194 */
1195 if (dev->flags & IFF_UP)
1196 return 0;
1197
1198 /*
1199 * Open device
1200 */
1201 ret = __dev_open(dev);
1202 if (ret < 0)
1203 return ret;
1204
1205 /*
1206 * ... and announce new interface.
1207 */
1208 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1209 call_netdevice_notifiers(NETDEV_UP, dev);
1210
1211 return ret;
1212}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001213EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Patrick McHardybd380812010-02-26 06:34:53 +00001215static int __dev_close(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001217 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardybd380812010-02-26 06:34:53 +00001218
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001219 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001220 might_sleep();
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 /*
1223 * Tell people we are going down, so that they can
1224 * prepare to death, when device is still operating.
1225 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001226 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 clear_bit(__LINK_STATE_START, &dev->state);
1229
1230 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001231 * it can be even on different cpu. So just clear netif_running().
1232 *
1233 * dev->stop() will invoke napi_disable() on all of it's
1234 * napi_struct instances on this device.
1235 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001238 dev_deactivate(dev);
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /*
1241 * Call the device specific close. This cannot fail.
1242 * Only if device is UP
1243 *
1244 * We allow it to be called even after a DETACH hot-plug
1245 * event.
1246 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001247 if (ops->ndo_stop)
1248 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
1250 /*
1251 * Device is now down.
1252 */
1253
1254 dev->flags &= ~IFF_UP;
1255
1256 /*
Dan Williams649274d2009-01-11 00:20:39 -08001257 * Shutdown NET_DMA
1258 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001259 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 return 0;
1262}
Patrick McHardybd380812010-02-26 06:34:53 +00001263
1264/**
1265 * dev_close - shutdown an interface.
1266 * @dev: device to shutdown
1267 *
1268 * This function moves an active device into down state. A
1269 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1270 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1271 * chain.
1272 */
1273int dev_close(struct net_device *dev)
1274{
1275 if (!(dev->flags & IFF_UP))
1276 return 0;
1277
1278 __dev_close(dev);
1279
1280 /*
1281 * Tell people we are down
1282 */
1283 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1284 call_netdevice_notifiers(NETDEV_DOWN, dev);
1285
1286 return 0;
1287}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001288EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001291/**
1292 * dev_disable_lro - disable Large Receive Offload on a device
1293 * @dev: device
1294 *
1295 * Disable Large Receive Offload (LRO) on a net device. Must be
1296 * called under RTNL. This is needed if received packets may be
1297 * forwarded to another interface.
1298 */
1299void dev_disable_lro(struct net_device *dev)
1300{
1301 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1302 dev->ethtool_ops->set_flags) {
1303 u32 flags = dev->ethtool_ops->get_flags(dev);
1304 if (flags & ETH_FLAG_LRO) {
1305 flags &= ~ETH_FLAG_LRO;
1306 dev->ethtool_ops->set_flags(dev, flags);
1307 }
1308 }
1309 WARN_ON(dev->features & NETIF_F_LRO);
1310}
1311EXPORT_SYMBOL(dev_disable_lro);
1312
1313
Eric W. Biederman881d9662007-09-17 11:56:21 -07001314static int dev_boot_phase = 1;
1315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316/*
1317 * Device change register/unregister. These are not inline or static
1318 * as we export them to the world.
1319 */
1320
1321/**
1322 * register_netdevice_notifier - register a network notifier block
1323 * @nb: notifier
1324 *
1325 * Register a notifier to be called when network device events occur.
1326 * The notifier passed is linked into the kernel structures and must
1327 * not be reused until it has been unregistered. A negative errno code
1328 * is returned on a failure.
1329 *
1330 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001331 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 * view of the network device list.
1333 */
1334
1335int register_netdevice_notifier(struct notifier_block *nb)
1336{
1337 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001338 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001339 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 int err;
1341
1342 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001343 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001344 if (err)
1345 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001346 if (dev_boot_phase)
1347 goto unlock;
1348 for_each_net(net) {
1349 for_each_netdev(net, dev) {
1350 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1351 err = notifier_to_errno(err);
1352 if (err)
1353 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Eric W. Biederman881d9662007-09-17 11:56:21 -07001355 if (!(dev->flags & IFF_UP))
1356 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001357
Eric W. Biederman881d9662007-09-17 11:56:21 -07001358 nb->notifier_call(nb, NETDEV_UP, dev);
1359 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001361
1362unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 rtnl_unlock();
1364 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001365
1366rollback:
1367 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001368 for_each_net(net) {
1369 for_each_netdev(net, dev) {
1370 if (dev == last)
1371 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001372
Eric W. Biederman881d9662007-09-17 11:56:21 -07001373 if (dev->flags & IFF_UP) {
1374 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1375 nb->notifier_call(nb, NETDEV_DOWN, dev);
1376 }
1377 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001378 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001379 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001380 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001381
1382 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001383 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001385EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
1387/**
1388 * unregister_netdevice_notifier - unregister a network notifier block
1389 * @nb: notifier
1390 *
1391 * Unregister a notifier previously registered by
1392 * register_netdevice_notifier(). The notifier is unlinked into the
1393 * kernel structures and may then be reused. A negative errno code
1394 * is returned on a failure.
1395 */
1396
1397int unregister_netdevice_notifier(struct notifier_block *nb)
1398{
Herbert Xu9f514952006-03-25 01:24:25 -08001399 int err;
1400
1401 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001402 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001403 rtnl_unlock();
1404 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001406EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408/**
1409 * call_netdevice_notifiers - call all network notifier blocks
1410 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001411 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 *
1413 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001414 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 */
1416
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001417int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001419 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420}
1421
1422/* When > 0 there are consumers of rx skb time stamps */
1423static atomic_t netstamp_needed = ATOMIC_INIT(0);
1424
1425void net_enable_timestamp(void)
1426{
1427 atomic_inc(&netstamp_needed);
1428}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001429EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
1431void net_disable_timestamp(void)
1432{
1433 atomic_dec(&netstamp_needed);
1434}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001435EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001437static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001440 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001441 else
1442 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443}
1444
Arnd Bergmann44540962009-11-26 06:07:08 +00001445/**
1446 * dev_forward_skb - loopback an skb to another netif
1447 *
1448 * @dev: destination network device
1449 * @skb: buffer to forward
1450 *
1451 * return values:
1452 * NET_RX_SUCCESS (no congestion)
1453 * NET_RX_DROP (packet was dropped)
1454 *
1455 * dev_forward_skb can be used for injecting an skb from the
1456 * start_xmit function of one device into the receive queue
1457 * of another device.
1458 *
1459 * The receiving device may be in another namespace, so
1460 * we have to clear all information in the skb that could
1461 * impact namespace isolation.
1462 */
1463int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1464{
1465 skb_orphan(skb);
1466
1467 if (!(dev->flags & IFF_UP))
1468 return NET_RX_DROP;
1469
1470 if (skb->len > (dev->mtu + dev->hard_header_len))
1471 return NET_RX_DROP;
1472
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001473 skb_set_dev(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001474 skb->tstamp.tv64 = 0;
1475 skb->pkt_type = PACKET_HOST;
1476 skb->protocol = eth_type_trans(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001477 return netif_rx(skb);
1478}
1479EXPORT_SYMBOL_GPL(dev_forward_skb);
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481/*
1482 * Support routine. Sends outgoing frames to any network
1483 * taps currently in use.
1484 */
1485
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001486static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001489
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001490#ifdef CONFIG_NET_CLS_ACT
1491 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1492 net_timestamp(skb);
1493#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001494 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001495#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 rcu_read_lock();
1498 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1499 /* Never send packets back to the socket
1500 * they originated from - MvS (miquels@drinkel.ow.org)
1501 */
1502 if ((ptype->dev == dev || !ptype->dev) &&
1503 (ptype->af_packet_priv == NULL ||
1504 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001505 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (!skb2)
1507 break;
1508
1509 /* skb->nh should be correctly
1510 set by sender, so that the second statement is
1511 just protection against buggy protocols.
1512 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001513 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001515 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001516 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (net_ratelimit())
1518 printk(KERN_CRIT "protocol %04x is "
1519 "buggy, dev %s\n",
1520 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001521 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
1523
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001524 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001526 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528 }
1529 rcu_read_unlock();
1530}
1531
Denis Vlasenko56079432006-03-29 15:57:29 -08001532
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001533static inline void __netif_reschedule(struct Qdisc *q)
1534{
1535 struct softnet_data *sd;
1536 unsigned long flags;
1537
1538 local_irq_save(flags);
1539 sd = &__get_cpu_var(softnet_data);
1540 q->next_sched = sd->output_queue;
1541 sd->output_queue = q;
1542 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1543 local_irq_restore(flags);
1544}
1545
David S. Miller37437bb2008-07-16 02:15:04 -07001546void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001547{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001548 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1549 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001550}
1551EXPORT_SYMBOL(__netif_schedule);
1552
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001553void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001554{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001555 if (atomic_dec_and_test(&skb->users)) {
1556 struct softnet_data *sd;
1557 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001558
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001559 local_irq_save(flags);
1560 sd = &__get_cpu_var(softnet_data);
1561 skb->next = sd->completion_queue;
1562 sd->completion_queue = skb;
1563 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1564 local_irq_restore(flags);
1565 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001566}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001567EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001568
1569void dev_kfree_skb_any(struct sk_buff *skb)
1570{
1571 if (in_irq() || irqs_disabled())
1572 dev_kfree_skb_irq(skb);
1573 else
1574 dev_kfree_skb(skb);
1575}
1576EXPORT_SYMBOL(dev_kfree_skb_any);
1577
1578
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001579/**
1580 * netif_device_detach - mark device as removed
1581 * @dev: network device
1582 *
1583 * Mark device as removed from system and therefore no longer available.
1584 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001585void netif_device_detach(struct net_device *dev)
1586{
1587 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1588 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001589 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001590 }
1591}
1592EXPORT_SYMBOL(netif_device_detach);
1593
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001594/**
1595 * netif_device_attach - mark device as attached
1596 * @dev: network device
1597 *
1598 * Mark device as attached from system and restart if needed.
1599 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001600void netif_device_attach(struct net_device *dev)
1601{
1602 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1603 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001604 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001605 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001606 }
1607}
1608EXPORT_SYMBOL(netif_device_attach);
1609
Ben Hutchings6de329e2008-06-16 17:02:28 -07001610static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1611{
1612 return ((features & NETIF_F_GEN_CSUM) ||
1613 ((features & NETIF_F_IP_CSUM) &&
1614 protocol == htons(ETH_P_IP)) ||
1615 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001616 protocol == htons(ETH_P_IPV6)) ||
1617 ((features & NETIF_F_FCOE_CRC) &&
1618 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001619}
1620
1621static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1622{
1623 if (can_checksum_protocol(dev->features, skb->protocol))
1624 return true;
1625
1626 if (skb->protocol == htons(ETH_P_8021Q)) {
1627 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1628 if (can_checksum_protocol(dev->features & dev->vlan_features,
1629 veh->h_vlan_encapsulated_proto))
1630 return true;
1631 }
1632
1633 return false;
1634}
Denis Vlasenko56079432006-03-29 15:57:29 -08001635
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001636/**
1637 * skb_dev_set -- assign a new device to a buffer
1638 * @skb: buffer for the new device
1639 * @dev: network device
1640 *
1641 * If an skb is owned by a device already, we have to reset
1642 * all data private to the namespace a device belongs to
1643 * before assigning it a new device.
1644 */
1645#ifdef CONFIG_NET_NS
1646void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1647{
1648 skb_dst_drop(skb);
1649 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1650 secpath_reset(skb);
1651 nf_reset(skb);
1652 skb_init_secmark(skb);
1653 skb->mark = 0;
1654 skb->priority = 0;
1655 skb->nf_trace = 0;
1656 skb->ipvs_property = 0;
1657#ifdef CONFIG_NET_SCHED
1658 skb->tc_index = 0;
1659#endif
1660 }
1661 skb->dev = dev;
1662}
1663EXPORT_SYMBOL(skb_set_dev);
1664#endif /* CONFIG_NET_NS */
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666/*
1667 * Invalidate hardware checksum when packet is to be mangled, and
1668 * complete checksum manually on outgoing path.
1669 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001670int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
Al Virod3bc23e2006-11-14 21:24:49 -08001672 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001673 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Patrick McHardy84fa7932006-08-29 16:44:56 -07001675 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001676 goto out_set_summed;
1677
1678 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001679 /* Let GSO fix up the checksum. */
1680 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
1682
Herbert Xua0308472007-10-15 01:47:15 -07001683 offset = skb->csum_start - skb_headroom(skb);
1684 BUG_ON(offset >= skb_headlen(skb));
1685 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1686
1687 offset += skb->csum_offset;
1688 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1689
1690 if (skb_cloned(skb) &&
1691 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1693 if (ret)
1694 goto out;
1695 }
1696
Herbert Xua0308472007-10-15 01:47:15 -07001697 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001698out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001700out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return ret;
1702}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001703EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001705/**
1706 * skb_gso_segment - Perform segmentation on skb.
1707 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001708 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001709 *
1710 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001711 *
1712 * It may return NULL if the skb requires no segmentation. This is
1713 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001714 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001715struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001716{
1717 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1718 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001719 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001720 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001721
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001722 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001723 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724 __skb_pull(skb, skb->mac_len);
1725
Herbert Xu67fd1a72009-01-19 16:26:44 -08001726 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1727 struct net_device *dev = skb->dev;
1728 struct ethtool_drvinfo info = {};
1729
1730 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1731 dev->ethtool_ops->get_drvinfo(dev, &info);
1732
1733 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1734 "ip_summed=%d",
1735 info.driver, dev ? dev->features : 0L,
1736 skb->sk ? skb->sk->sk_route_caps : 0L,
1737 skb->len, skb->data_len, skb->ip_summed);
1738
Herbert Xua430a432006-07-08 13:34:56 -07001739 if (skb_header_cloned(skb) &&
1740 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1741 return ERR_PTR(err);
1742 }
1743
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001744 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001745 list_for_each_entry_rcu(ptype,
1746 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001747 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001748 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001749 err = ptype->gso_send_check(skb);
1750 segs = ERR_PTR(err);
1751 if (err || skb_gso_ok(skb, features))
1752 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001753 __skb_push(skb, (skb->data -
1754 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001755 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001756 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001757 break;
1758 }
1759 }
1760 rcu_read_unlock();
1761
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001762 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001763
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001764 return segs;
1765}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766EXPORT_SYMBOL(skb_gso_segment);
1767
Herbert Xufb286bb2005-11-10 13:01:24 -08001768/* Take action when hardware reception checksum errors are detected. */
1769#ifdef CONFIG_BUG
1770void netdev_rx_csum_fault(struct net_device *dev)
1771{
1772 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001773 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001774 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001775 dump_stack();
1776 }
1777}
1778EXPORT_SYMBOL(netdev_rx_csum_fault);
1779#endif
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/* Actually, we should eliminate this check as soon as we know, that:
1782 * 1. IOMMU is present and allows to map all the memory.
1783 * 2. No high memory really exists on this machine.
1784 */
1785
1786static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1787{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001788#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 int i;
1790
1791 if (dev->features & NETIF_F_HIGHDMA)
1792 return 0;
1793
1794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1795 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1796 return 1;
1797
Herbert Xu3d3a8532006-06-27 13:33:10 -07001798#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return 0;
1800}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001802struct dev_gso_cb {
1803 void (*destructor)(struct sk_buff *skb);
1804};
1805
1806#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1807
1808static void dev_gso_skb_destructor(struct sk_buff *skb)
1809{
1810 struct dev_gso_cb *cb;
1811
1812 do {
1813 struct sk_buff *nskb = skb->next;
1814
1815 skb->next = nskb->next;
1816 nskb->next = NULL;
1817 kfree_skb(nskb);
1818 } while (skb->next);
1819
1820 cb = DEV_GSO_CB(skb);
1821 if (cb->destructor)
1822 cb->destructor(skb);
1823}
1824
1825/**
1826 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1827 * @skb: buffer to segment
1828 *
1829 * This function segments the given skb and stores the list of segments
1830 * in skb->next.
1831 */
1832static int dev_gso_segment(struct sk_buff *skb)
1833{
1834 struct net_device *dev = skb->dev;
1835 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001836 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1837 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001838
Herbert Xu576a30e2006-06-27 13:22:38 -07001839 segs = skb_gso_segment(skb, features);
1840
1841 /* Verifying header integrity only. */
1842 if (!segs)
1843 return 0;
1844
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001845 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001846 return PTR_ERR(segs);
1847
1848 skb->next = segs;
1849 DEV_GSO_CB(skb)->destructor = skb->destructor;
1850 skb->destructor = dev_gso_skb_destructor;
1851
1852 return 0;
1853}
1854
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001855int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1856 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001857{
Stephen Hemminger00829822008-11-20 20:14:53 -08001858 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001859 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001860
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001861 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001862 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001863 dev_queue_xmit_nit(skb, dev);
1864
Herbert Xu576a30e2006-06-27 13:22:38 -07001865 if (netif_needs_gso(dev, skb)) {
1866 if (unlikely(dev_gso_segment(skb)))
1867 goto out_kfree_skb;
1868 if (skb->next)
1869 goto gso;
1870 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001871
Eric Dumazet93f154b2009-05-18 22:19:19 -07001872 /*
1873 * If device doesnt need skb->dst, release it right now while
1874 * its hot in this cpu cache
1875 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001876 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1877 skb_dst_drop(skb);
1878
Patrick Ohlyac45f602009-02-12 05:03:37 +00001879 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001880 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001881 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001882 /*
1883 * TODO: if skb_orphan() was called by
1884 * dev->hard_start_xmit() (for example, the unmodified
1885 * igb driver does that; bnx2 doesn't), then
1886 * skb_tx_software_timestamp() will be unable to send
1887 * back the time stamp.
1888 *
1889 * How can this be prevented? Always create another
1890 * reference to the socket before calling
1891 * dev->hard_start_xmit()? Prevent that skb_orphan()
1892 * does anything in dev->hard_start_xmit() by clearing
1893 * the skb destructor before the call and restoring it
1894 * afterwards, then doing the skb_orphan() ourselves?
1895 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001896 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001897 }
1898
Herbert Xu576a30e2006-06-27 13:22:38 -07001899gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001900 do {
1901 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001902
1903 skb->next = nskb->next;
1904 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00001905
1906 /*
1907 * If device doesnt need nskb->dst, release it right now while
1908 * its hot in this cpu cache
1909 */
1910 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1911 skb_dst_drop(nskb);
1912
Stephen Hemminger00829822008-11-20 20:14:53 -08001913 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001914 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001915 if (rc & ~NETDEV_TX_MASK)
1916 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001917 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001918 skb->next = nskb;
1919 return rc;
1920 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001921 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001922 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001923 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001924 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001925
Patrick McHardy572a9d72009-11-10 06:14:14 +00001926out_kfree_gso_skb:
1927 if (likely(skb->next == NULL))
1928 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001929out_kfree_skb:
1930 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001931 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001932}
1933
David S. Miller70192982009-01-27 16:34:47 -08001934static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001935
Stephen Hemminger92477442009-03-21 13:39:26 -07001936u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001937{
David S. Miller70192982009-01-27 16:34:47 -08001938 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001939
David S. Miller513de112009-05-03 14:43:10 -07001940 if (skb_rx_queue_recorded(skb)) {
1941 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001942 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001943 hash -= dev->real_num_tx_queues;
1944 return hash;
1945 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001946
1947 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001948 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001949 else
David S. Miller70192982009-01-27 16:34:47 -08001950 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001951
David S. Miller70192982009-01-27 16:34:47 -08001952 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001953
David S. Millerb6b2fed2008-07-21 09:48:06 -07001954 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001955}
Stephen Hemminger92477442009-03-21 13:39:26 -07001956EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001957
Eric Dumazeted046422009-11-13 21:54:04 +00001958static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1959{
1960 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1961 if (net_ratelimit()) {
1962 WARN(1, "%s selects TX queue %d, but "
1963 "real number of TX queues is %d\n",
1964 dev->name, queue_index,
1965 dev->real_num_tx_queues);
1966 }
1967 return 0;
1968 }
1969 return queue_index;
1970}
1971
David S. Millere8a04642008-07-17 00:34:19 -07001972static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1973 struct sk_buff *skb)
1974{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001975 u16 queue_index;
1976 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001977
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001978 if (sk_tx_queue_recorded(sk)) {
1979 queue_index = sk_tx_queue_get(sk);
1980 } else {
1981 const struct net_device_ops *ops = dev->netdev_ops;
1982
1983 if (ops->ndo_select_queue) {
1984 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001985 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001986 } else {
1987 queue_index = 0;
1988 if (dev->real_num_tx_queues > 1)
1989 queue_index = skb_tx_hash(dev, skb);
1990
1991 if (sk && sk->sk_dst_cache)
1992 sk_tx_queue_set(sk, queue_index);
1993 }
1994 }
David S. Millereae792b2008-07-15 03:03:33 -07001995
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001996 skb_set_queue_mapping(skb, queue_index);
1997 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001998}
1999
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002000static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2001 struct net_device *dev,
2002 struct netdev_queue *txq)
2003{
2004 spinlock_t *root_lock = qdisc_lock(q);
2005 int rc;
2006
2007 spin_lock(root_lock);
2008 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2009 kfree_skb(skb);
2010 rc = NET_XMIT_DROP;
2011 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2012 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2013 /*
2014 * This is a work-conserving queue; there are no old skbs
2015 * waiting to be sent out; and the qdisc is not running -
2016 * xmit the skb directly.
2017 */
2018 __qdisc_update_bstats(q, skb->len);
2019 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2020 __qdisc_run(q);
2021 else
2022 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2023
2024 rc = NET_XMIT_SUCCESS;
2025 } else {
2026 rc = qdisc_enqueue_root(skb, q);
2027 qdisc_run(q);
2028 }
2029 spin_unlock(root_lock);
2030
2031 return rc;
2032}
2033
Krishna Kumar4b258462010-01-21 01:26:29 -08002034/*
2035 * Returns true if either:
2036 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2037 * 2. skb is fragmented and the device does not support SG, or if
2038 * at least one of fragments is in highmem and device does not
2039 * support DMA from it.
2040 */
2041static inline int skb_needs_linearize(struct sk_buff *skb,
2042 struct net_device *dev)
2043{
2044 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2045 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2046 illegal_highdma(dev, skb)));
2047}
2048
Dave Jonesd29f7492008-07-22 14:09:06 -07002049/**
2050 * dev_queue_xmit - transmit a buffer
2051 * @skb: buffer to transmit
2052 *
2053 * Queue a buffer for transmission to a network device. The caller must
2054 * have set the device and priority and built the buffer before calling
2055 * this function. The function can be called from an interrupt.
2056 *
2057 * A negative errno code is returned on a failure. A success does not
2058 * guarantee the frame will be transmitted as it may be dropped due
2059 * to congestion or traffic shaping.
2060 *
2061 * -----------------------------------------------------------------------------------
2062 * I notice this method can also return errors from the queue disciplines,
2063 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2064 * be positive.
2065 *
2066 * Regardless of the return value, the skb is consumed, so it is currently
2067 * difficult to retry a send to this method. (You can bump the ref count
2068 * before sending to hold a reference for retry if you are careful.)
2069 *
2070 * When calling this method, interrupts MUST be enabled. This is because
2071 * the BH enable code must have IRQs enabled so that it will not deadlock.
2072 * --BLG
2073 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074int dev_queue_xmit(struct sk_buff *skb)
2075{
2076 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002077 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 struct Qdisc *q;
2079 int rc = -ENOMEM;
2080
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002081 /* GSO will handle the following emulations directly. */
2082 if (netif_needs_gso(dev, skb))
2083 goto gso;
2084
Krishna Kumar4b258462010-01-21 01:26:29 -08002085 /* Convert a paged skb to linear, if required */
2086 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 goto out_kfree_skb;
2088
2089 /* If packet is not checksummed and device does not support
2090 * checksumming for this protocol, complete checksumming here.
2091 */
Herbert Xu663ead32007-04-09 11:59:07 -07002092 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2093 skb_set_transport_header(skb, skb->csum_start -
2094 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002095 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2096 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002099gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002100 /* Disable soft irqs for various locks below. Also
2101 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002103 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
David S. Millereae792b2008-07-15 03:03:33 -07002105 txq = dev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002106 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002109 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110#endif
2111 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002112 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002113 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 }
2115
2116 /* The device has no queue. Common case for software devices:
2117 loopback, all the sorts of tunnels...
2118
Herbert Xu932ff272006-06-09 12:20:56 -07002119 Really, it is unlikely that netif_tx_lock protection is necessary
2120 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 counters.)
2122 However, it is possible, that they rely on protection
2123 made by us here.
2124
2125 Check this and shot the lock. It is not prone from deadlocks.
2126 Either shot noqueue qdisc, it is even simpler 8)
2127 */
2128 if (dev->flags & IFF_UP) {
2129 int cpu = smp_processor_id(); /* ok because BHs are off */
2130
David S. Millerc773e842008-07-08 23:13:53 -07002131 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
David S. Millerc773e842008-07-08 23:13:53 -07002133 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002135 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002136 rc = dev_hard_start_xmit(skb, dev, txq);
2137 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002138 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 goto out;
2140 }
2141 }
David S. Millerc773e842008-07-08 23:13:53 -07002142 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 if (net_ratelimit())
2144 printk(KERN_CRIT "Virtual device %s asks to "
2145 "queue packet!\n", dev->name);
2146 } else {
2147 /* Recursion is detected! It is possible,
2148 * unfortunately */
2149 if (net_ratelimit())
2150 printk(KERN_CRIT "Dead loop on virtual device "
2151 "%s, fix it urgently!\n", dev->name);
2152 }
2153 }
2154
2155 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002156 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158out_kfree_skb:
2159 kfree_skb(skb);
2160 return rc;
2161out:
Herbert Xud4828d82006-06-22 02:28:18 -07002162 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 return rc;
2164}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002165EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
2167
2168/*=======================================================================
2169 Receiver routines
2170 =======================================================================*/
2171
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002172int netdev_max_backlog __read_mostly = 1000;
2173int netdev_budget __read_mostly = 300;
2174int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2177
2178
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179/**
2180 * netif_rx - post buffer to the network code
2181 * @skb: buffer to post
2182 *
2183 * This function receives a packet from a device driver and queues it for
2184 * the upper (protocol) levels to process. It always succeeds. The buffer
2185 * may be dropped during processing for congestion control or by the
2186 * protocol layers.
2187 *
2188 * return values:
2189 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 * NET_RX_DROP (packet was dropped)
2191 *
2192 */
2193
2194int netif_rx(struct sk_buff *skb)
2195{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 struct softnet_data *queue;
2197 unsigned long flags;
2198
2199 /* if netpoll wants it, pretend we never saw it */
2200 if (netpoll_rx(skb))
2201 return NET_RX_DROP;
2202
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002203 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002204 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 /*
2207 * The code is rearranged so that the path is the most
2208 * short when CPU is congested, but is still operating.
2209 */
2210 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 queue = &__get_cpu_var(softnet_data);
2212
2213 __get_cpu_var(netdev_rx_stat).total++;
2214 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2215 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002219 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 }
2221
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002222 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 goto enqueue;
2224 }
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 __get_cpu_var(netdev_rx_stat).dropped++;
2227 local_irq_restore(flags);
2228
2229 kfree_skb(skb);
2230 return NET_RX_DROP;
2231}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002232EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
2234int netif_rx_ni(struct sk_buff *skb)
2235{
2236 int err;
2237
2238 preempt_disable();
2239 err = netif_rx(skb);
2240 if (local_softirq_pending())
2241 do_softirq();
2242 preempt_enable();
2243
2244 return err;
2245}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246EXPORT_SYMBOL(netif_rx_ni);
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248static void net_tx_action(struct softirq_action *h)
2249{
2250 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2251
2252 if (sd->completion_queue) {
2253 struct sk_buff *clist;
2254
2255 local_irq_disable();
2256 clist = sd->completion_queue;
2257 sd->completion_queue = NULL;
2258 local_irq_enable();
2259
2260 while (clist) {
2261 struct sk_buff *skb = clist;
2262 clist = clist->next;
2263
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002264 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 __kfree_skb(skb);
2266 }
2267 }
2268
2269 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002270 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
2272 local_irq_disable();
2273 head = sd->output_queue;
2274 sd->output_queue = NULL;
2275 local_irq_enable();
2276
2277 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002278 struct Qdisc *q = head;
2279 spinlock_t *root_lock;
2280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 head = head->next_sched;
2282
David S. Miller5fb66222008-08-02 20:02:43 -07002283 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002284 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002285 smp_mb__before_clear_bit();
2286 clear_bit(__QDISC_STATE_SCHED,
2287 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002288 qdisc_run(q);
2289 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002291 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002292 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002293 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002294 } else {
2295 smp_mb__before_clear_bit();
2296 clear_bit(__QDISC_STATE_SCHED,
2297 &q->state);
2298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 }
2300 }
2301 }
2302}
2303
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002304static inline int deliver_skb(struct sk_buff *skb,
2305 struct packet_type *pt_prev,
2306 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307{
2308 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002309 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310}
2311
2312#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002313
2314#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2315/* This hook is defined here for ATM LANE */
2316int (*br_fdb_test_addr_hook)(struct net_device *dev,
2317 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002318EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002319#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Stephen Hemminger6229e362007-03-21 13:38:47 -07002321/*
2322 * If bridge module is loaded call bridging hook.
2323 * returns NULL if packet was consumed.
2324 */
2325struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2326 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002327EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002328
Stephen Hemminger6229e362007-03-21 13:38:47 -07002329static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2330 struct packet_type **pt_prev, int *ret,
2331 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332{
2333 struct net_bridge_port *port;
2334
Stephen Hemminger6229e362007-03-21 13:38:47 -07002335 if (skb->pkt_type == PACKET_LOOPBACK ||
2336 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2337 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
2339 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002340 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002342 }
2343
Stephen Hemminger6229e362007-03-21 13:38:47 -07002344 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345}
2346#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002347#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348#endif
2349
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002350#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2351struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2352EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2353
2354static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2355 struct packet_type **pt_prev,
2356 int *ret,
2357 struct net_device *orig_dev)
2358{
2359 if (skb->dev->macvlan_port == NULL)
2360 return skb;
2361
2362 if (*pt_prev) {
2363 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2364 *pt_prev = NULL;
2365 }
2366 return macvlan_handle_frame_hook(skb);
2367}
2368#else
2369#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2370#endif
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372#ifdef CONFIG_NET_CLS_ACT
2373/* TODO: Maybe we should just force sch_ingress to be compiled in
2374 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2375 * a compare and 2 stores extra right now if we dont have it on
2376 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002377 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 * the ingress scheduler, you just cant add policies on ingress.
2379 *
2380 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002381static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002384 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002385 struct netdev_queue *rxq;
2386 int result = TC_ACT_OK;
2387 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002388
Herbert Xuf697c3e2007-10-14 00:38:47 -07002389 if (MAX_RED_LOOP < ttl++) {
2390 printk(KERN_WARNING
2391 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002392 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002393 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 }
2395
Herbert Xuf697c3e2007-10-14 00:38:47 -07002396 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2397 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2398
David S. Miller555353c2008-07-08 17:33:13 -07002399 rxq = &dev->rx_queue;
2400
David S. Miller83874002008-07-17 00:53:03 -07002401 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002402 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002403 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002404 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2405 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002406 spin_unlock(qdisc_lock(q));
2407 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 return result;
2410}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002411
2412static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2413 struct packet_type **pt_prev,
2414 int *ret, struct net_device *orig_dev)
2415{
David S. Miller8d50b532008-07-30 02:37:46 -07002416 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002417 goto out;
2418
2419 if (*pt_prev) {
2420 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2421 *pt_prev = NULL;
2422 } else {
2423 /* Huh? Why does turning on AF_PACKET affect this? */
2424 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2425 }
2426
2427 switch (ing_filter(skb)) {
2428 case TC_ACT_SHOT:
2429 case TC_ACT_STOLEN:
2430 kfree_skb(skb);
2431 return NULL;
2432 }
2433
2434out:
2435 skb->tc_verd = 0;
2436 return skb;
2437}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438#endif
2439
Patrick McHardybc1d0412008-07-14 22:49:30 -07002440/*
2441 * netif_nit_deliver - deliver received packets to network taps
2442 * @skb: buffer
2443 *
2444 * This function is used to deliver incoming packets to network
2445 * taps. It should be used when the normal netif_receive_skb path
2446 * is bypassed, for example because of VLAN acceleration.
2447 */
2448void netif_nit_deliver(struct sk_buff *skb)
2449{
2450 struct packet_type *ptype;
2451
2452 if (list_empty(&ptype_all))
2453 return;
2454
2455 skb_reset_network_header(skb);
2456 skb_reset_transport_header(skb);
2457 skb->mac_len = skb->network_header - skb->mac_header;
2458
2459 rcu_read_lock();
2460 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2461 if (!ptype->dev || ptype->dev == skb->dev)
2462 deliver_skb(skb, ptype, skb->dev);
2463 }
2464 rcu_read_unlock();
2465}
2466
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002467/**
2468 * netif_receive_skb - process receive buffer from network
2469 * @skb: buffer to process
2470 *
2471 * netif_receive_skb() is the main receive data processing function.
2472 * It always succeeds. The buffer may be dropped during processing
2473 * for congestion control or by the protocol layers.
2474 *
2475 * This function may only be called from softirq context and interrupts
2476 * should be enabled.
2477 *
2478 * Return values (usually ignored):
2479 * NET_RX_SUCCESS: no congestion
2480 * NET_RX_DROP: packet was dropped
2481 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482int netif_receive_skb(struct sk_buff *skb)
2483{
2484 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002485 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002486 struct net_device *null_or_orig;
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002487 struct net_device *null_or_bond;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002489 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002491 if (!skb->tstamp.tv64)
2492 net_timestamp(skb);
2493
Eric Dumazet05423b22009-10-26 18:40:35 -07002494 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002495 return NET_RX_SUCCESS;
2496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002498 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 return NET_RX_DROP;
2500
Eric Dumazet8964be42009-11-20 15:35:04 -08002501 if (!skb->skb_iif)
2502 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002503
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002504 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002505 orig_dev = skb->dev;
2506 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002507 if (skb_bond_should_drop(skb))
2508 null_or_orig = orig_dev; /* deliver only exact match */
2509 else
2510 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002511 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 __get_cpu_var(netdev_rx_stat).total++;
2514
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002515 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002516 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002517 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
2519 pt_prev = NULL;
2520
2521 rcu_read_lock();
2522
2523#ifdef CONFIG_NET_CLS_ACT
2524 if (skb->tc_verd & TC_NCLS) {
2525 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2526 goto ncls;
2527 }
2528#endif
2529
2530 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002531 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2532 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002533 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002534 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 pt_prev = ptype;
2536 }
2537 }
2538
2539#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002540 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2541 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543ncls:
2544#endif
2545
Stephen Hemminger6229e362007-03-21 13:38:47 -07002546 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2547 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002549 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2550 if (!skb)
2551 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002553 /*
2554 * Make sure frames received on VLAN interfaces stacked on
2555 * bonding interfaces still make their way to any base bonding
2556 * device that may have registered for a specific ptype. The
2557 * handler may have to adjust skb->dev and orig_dev.
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002558 */
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002559 null_or_bond = NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002560 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2561 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002562 null_or_bond = vlan_dev_real_dev(skb->dev);
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002563 }
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002566 list_for_each_entry_rcu(ptype,
2567 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002568 if (ptype->type == type && (ptype->dev == null_or_orig ||
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002569 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2570 ptype->dev == null_or_bond)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002571 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002572 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 pt_prev = ptype;
2574 }
2575 }
2576
2577 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002578 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 } else {
2580 kfree_skb(skb);
2581 /* Jamal, now you will not able to escape explaining
2582 * me how you were going to use this. :-)
2583 */
2584 ret = NET_RX_DROP;
2585 }
2586
2587out:
2588 rcu_read_unlock();
2589 return ret;
2590}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002591EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002593/* Network device is going away, flush any packets still pending */
2594static void flush_backlog(void *arg)
2595{
2596 struct net_device *dev = arg;
2597 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2598 struct sk_buff *skb, *tmp;
2599
2600 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2601 if (skb->dev == dev) {
2602 __skb_unlink(skb, &queue->input_pkt_queue);
2603 kfree_skb(skb);
2604 }
2605}
2606
Herbert Xud565b0a2008-12-15 23:38:52 -08002607static int napi_gro_complete(struct sk_buff *skb)
2608{
2609 struct packet_type *ptype;
2610 __be16 type = skb->protocol;
2611 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2612 int err = -ENOENT;
2613
Herbert Xufc59f9a2009-04-14 15:11:06 -07002614 if (NAPI_GRO_CB(skb)->count == 1) {
2615 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002616 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002617 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002618
2619 rcu_read_lock();
2620 list_for_each_entry_rcu(ptype, head, list) {
2621 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2622 continue;
2623
2624 err = ptype->gro_complete(skb);
2625 break;
2626 }
2627 rcu_read_unlock();
2628
2629 if (err) {
2630 WARN_ON(&ptype->list == head);
2631 kfree_skb(skb);
2632 return NET_RX_SUCCESS;
2633 }
2634
2635out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002636 return netif_receive_skb(skb);
2637}
2638
David S. Miller11380a42010-01-19 13:46:10 -08002639static void napi_gro_flush(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08002640{
2641 struct sk_buff *skb, *next;
2642
2643 for (skb = napi->gro_list; skb; skb = next) {
2644 next = skb->next;
2645 skb->next = NULL;
2646 napi_gro_complete(skb);
2647 }
2648
Herbert Xu4ae55442009-02-08 18:00:36 +00002649 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002650 napi->gro_list = NULL;
2651}
Herbert Xud565b0a2008-12-15 23:38:52 -08002652
Ben Hutchings5b252f02009-10-29 07:17:09 +00002653enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002654{
2655 struct sk_buff **pp = NULL;
2656 struct packet_type *ptype;
2657 __be16 type = skb->protocol;
2658 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002659 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002660 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002661 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002662
2663 if (!(skb->dev->features & NETIF_F_GRO))
2664 goto normal;
2665
David S. Miller4cf704f2009-06-09 00:18:51 -07002666 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002667 goto normal;
2668
Herbert Xud565b0a2008-12-15 23:38:52 -08002669 rcu_read_lock();
2670 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002671 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2672 continue;
2673
Herbert Xu86911732009-01-29 14:19:50 +00002674 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002675 mac_len = skb->network_header - skb->mac_header;
2676 skb->mac_len = mac_len;
2677 NAPI_GRO_CB(skb)->same_flow = 0;
2678 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002679 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002680
Herbert Xud565b0a2008-12-15 23:38:52 -08002681 pp = ptype->gro_receive(&napi->gro_list, skb);
2682 break;
2683 }
2684 rcu_read_unlock();
2685
2686 if (&ptype->list == head)
2687 goto normal;
2688
Herbert Xu0da2afd52008-12-26 14:57:42 -08002689 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002690 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002691
Herbert Xud565b0a2008-12-15 23:38:52 -08002692 if (pp) {
2693 struct sk_buff *nskb = *pp;
2694
2695 *pp = nskb->next;
2696 nskb->next = NULL;
2697 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002698 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002699 }
2700
Herbert Xu0da2afd52008-12-26 14:57:42 -08002701 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002702 goto ok;
2703
Herbert Xu4ae55442009-02-08 18:00:36 +00002704 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002705 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002706
Herbert Xu4ae55442009-02-08 18:00:36 +00002707 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002708 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002709 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002710 skb->next = napi->gro_list;
2711 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002712 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002713
Herbert Xuad0f9902009-02-01 01:24:55 -08002714pull:
Herbert Xucb189782009-05-26 18:50:31 +00002715 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2716 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2717
2718 BUG_ON(skb->end - skb->tail < grow);
2719
2720 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2721
2722 skb->tail += grow;
2723 skb->data_len -= grow;
2724
2725 skb_shinfo(skb)->frags[0].page_offset += grow;
2726 skb_shinfo(skb)->frags[0].size -= grow;
2727
2728 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2729 put_page(skb_shinfo(skb)->frags[0].page);
2730 memmove(skb_shinfo(skb)->frags,
2731 skb_shinfo(skb)->frags + 1,
2732 --skb_shinfo(skb)->nr_frags);
2733 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002734 }
2735
Herbert Xud565b0a2008-12-15 23:38:52 -08002736ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002737 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002738
2739normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002740 ret = GRO_NORMAL;
2741 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002742}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002743EXPORT_SYMBOL(dev_gro_receive);
2744
Ben Hutchings5b252f02009-10-29 07:17:09 +00002745static gro_result_t
2746__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002747{
2748 struct sk_buff *p;
2749
Herbert Xud1c76af2009-03-16 10:50:02 -07002750 if (netpoll_rx_on(skb))
2751 return GRO_NORMAL;
2752
Herbert Xu96e93ea2009-01-06 10:49:34 -08002753 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08002754 NAPI_GRO_CB(p)->same_flow =
2755 (p->dev == skb->dev) &&
2756 !compare_ether_header(skb_mac_header(p),
2757 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002758 NAPI_GRO_CB(p)->flush = 0;
2759 }
2760
2761 return dev_gro_receive(napi, skb);
2762}
Herbert Xu5d38a072009-01-04 16:13:40 -08002763
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002764gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002765{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002766 switch (ret) {
2767 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002768 if (netif_receive_skb(skb))
2769 ret = GRO_DROP;
2770 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002771
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002772 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002773 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002774 kfree_skb(skb);
2775 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002776
2777 case GRO_HELD:
2778 case GRO_MERGED:
2779 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002780 }
2781
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002782 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002783}
2784EXPORT_SYMBOL(napi_skb_finish);
2785
Herbert Xu78a478d2009-05-26 18:50:21 +00002786void skb_gro_reset_offset(struct sk_buff *skb)
2787{
2788 NAPI_GRO_CB(skb)->data_offset = 0;
2789 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002790 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002791
Herbert Xu78d3fd02009-05-26 18:50:23 +00002792 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002793 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002794 NAPI_GRO_CB(skb)->frag0 =
2795 page_address(skb_shinfo(skb)->frags[0].page) +
2796 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002797 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2798 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002799}
2800EXPORT_SYMBOL(skb_gro_reset_offset);
2801
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002802gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002803{
Herbert Xu86911732009-01-29 14:19:50 +00002804 skb_gro_reset_offset(skb);
2805
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002806 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002807}
2808EXPORT_SYMBOL(napi_gro_receive);
2809
Herbert Xu96e93ea2009-01-06 10:49:34 -08002810void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2811{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002812 __skb_pull(skb, skb_headlen(skb));
2813 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2814
2815 napi->skb = skb;
2816}
2817EXPORT_SYMBOL(napi_reuse_skb);
2818
Herbert Xu76620aa2009-04-16 02:02:07 -07002819struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002820{
Herbert Xu5d38a072009-01-04 16:13:40 -08002821 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002822
2823 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002824 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2825 if (skb)
2826 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002827 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002828 return skb;
2829}
Herbert Xu76620aa2009-04-16 02:02:07 -07002830EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002831
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002832gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2833 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002834{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002835 switch (ret) {
2836 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002837 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00002838 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00002839
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002840 if (ret == GRO_HELD)
2841 skb_gro_pull(skb, -ETH_HLEN);
2842 else if (netif_receive_skb(skb))
2843 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002844 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002845
2846 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002847 case GRO_MERGED_FREE:
2848 napi_reuse_skb(napi, skb);
2849 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002850
2851 case GRO_MERGED:
2852 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002853 }
2854
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002855 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002856}
2857EXPORT_SYMBOL(napi_frags_finish);
2858
Herbert Xu76620aa2009-04-16 02:02:07 -07002859struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002860{
Herbert Xu76620aa2009-04-16 02:02:07 -07002861 struct sk_buff *skb = napi->skb;
2862 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002863 unsigned int hlen;
2864 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002865
2866 napi->skb = NULL;
2867
2868 skb_reset_mac_header(skb);
2869 skb_gro_reset_offset(skb);
2870
Herbert Xua5b1cf22009-05-26 18:50:28 +00002871 off = skb_gro_offset(skb);
2872 hlen = off + sizeof(*eth);
2873 eth = skb_gro_header_fast(skb, off);
2874 if (skb_gro_header_hard(skb, hlen)) {
2875 eth = skb_gro_header_slow(skb, hlen, off);
2876 if (unlikely(!eth)) {
2877 napi_reuse_skb(napi, skb);
2878 skb = NULL;
2879 goto out;
2880 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002881 }
2882
2883 skb_gro_pull(skb, sizeof(*eth));
2884
2885 /*
2886 * This works because the only protocols we care about don't require
2887 * special handling. We'll fix it up properly at the end.
2888 */
2889 skb->protocol = eth->h_proto;
2890
2891out:
2892 return skb;
2893}
2894EXPORT_SYMBOL(napi_frags_skb);
2895
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002896gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002897{
2898 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002899
2900 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002901 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002902
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002903 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002904}
2905EXPORT_SYMBOL(napi_gro_frags);
2906
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002907static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908{
2909 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2911 unsigned long start_time = jiffies;
2912
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002913 napi->weight = weight_p;
2914 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
2917 local_irq_disable();
2918 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002919 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002920 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002921 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002922 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 local_irq_enable();
2925
Herbert Xu8f1ead22009-03-26 00:59:10 -07002926 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002927 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002929 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930}
2931
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002932/**
2933 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002934 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002935 *
2936 * The entry's receive function will be scheduled to run
2937 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002938void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002939{
2940 unsigned long flags;
2941
2942 local_irq_save(flags);
2943 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2944 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2945 local_irq_restore(flags);
2946}
2947EXPORT_SYMBOL(__napi_schedule);
2948
Herbert Xud565b0a2008-12-15 23:38:52 -08002949void __napi_complete(struct napi_struct *n)
2950{
2951 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2952 BUG_ON(n->gro_list);
2953
2954 list_del(&n->poll_list);
2955 smp_mb__before_clear_bit();
2956 clear_bit(NAPI_STATE_SCHED, &n->state);
2957}
2958EXPORT_SYMBOL(__napi_complete);
2959
2960void napi_complete(struct napi_struct *n)
2961{
2962 unsigned long flags;
2963
2964 /*
2965 * don't let napi dequeue from the cpu poll list
2966 * just in case its running on a different cpu
2967 */
2968 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2969 return;
2970
2971 napi_gro_flush(n);
2972 local_irq_save(flags);
2973 __napi_complete(n);
2974 local_irq_restore(flags);
2975}
2976EXPORT_SYMBOL(napi_complete);
2977
2978void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2979 int (*poll)(struct napi_struct *, int), int weight)
2980{
2981 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002982 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002983 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002984 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002985 napi->poll = poll;
2986 napi->weight = weight;
2987 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002988 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002989#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002990 spin_lock_init(&napi->poll_lock);
2991 napi->poll_owner = -1;
2992#endif
2993 set_bit(NAPI_STATE_SCHED, &napi->state);
2994}
2995EXPORT_SYMBOL(netif_napi_add);
2996
2997void netif_napi_del(struct napi_struct *napi)
2998{
2999 struct sk_buff *skb, *next;
3000
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08003001 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07003002 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08003003
3004 for (skb = napi->gro_list; skb; skb = next) {
3005 next = skb->next;
3006 skb->next = NULL;
3007 kfree_skb(skb);
3008 }
3009
3010 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00003011 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003012}
3013EXPORT_SYMBOL(netif_napi_del);
3014
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003015
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016static void net_rx_action(struct softirq_action *h)
3017{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003018 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003019 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07003020 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07003021 void *have;
3022
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 local_irq_disable();
3024
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003025 while (!list_empty(list)) {
3026 struct napi_struct *n;
3027 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003029 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003030 * Allow this to run for 2 jiffies since which will allow
3031 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003032 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003033 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034 goto softnet_break;
3035
3036 local_irq_enable();
3037
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003038 /* Even though interrupts have been re-enabled, this
3039 * access is safe because interrupts can only add new
3040 * entries to the tail of this list, and only ->poll()
3041 * calls can remove this head entry from the list.
3042 */
stephen hemmingere5e26d72010-02-24 14:01:38 +00003043 n = list_first_entry(list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003045 have = netpoll_poll_lock(n);
3046
3047 weight = n->weight;
3048
David S. Miller0a7606c2007-10-29 21:28:47 -07003049 /* This NAPI_STATE_SCHED test is for avoiding a race
3050 * with netpoll's poll_napi(). Only the entity which
3051 * obtains the lock and sees NAPI_STATE_SCHED set will
3052 * actually make the ->poll() call. Therefore we avoid
3053 * accidently calling ->poll() when NAPI is not scheduled.
3054 */
3055 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00003056 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07003057 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00003058 trace_napi_poll(n);
3059 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003060
3061 WARN_ON_ONCE(work > weight);
3062
3063 budget -= work;
3064
3065 local_irq_disable();
3066
3067 /* Drivers must not modify the NAPI state if they
3068 * consume the entire weight. In such cases this code
3069 * still "owns" the NAPI instance and therefore can
3070 * move the instance around on the list at-will.
3071 */
David S. Millerfed17f32008-01-07 21:00:40 -08003072 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07003073 if (unlikely(napi_disable_pending(n))) {
3074 local_irq_enable();
3075 napi_complete(n);
3076 local_irq_disable();
3077 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003078 list_move_tail(&n->poll_list, list);
3079 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003080
3081 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 }
3083out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07003084 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003085
Chris Leechdb217332006-06-17 21:24:58 -07003086#ifdef CONFIG_NET_DMA
3087 /*
3088 * There may not be any more sk_buffs coming right now, so push
3089 * any pending DMA copies to hardware
3090 */
Dan Williams2ba05622009-01-06 11:38:14 -07003091 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003092#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003093
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 return;
3095
3096softnet_break:
3097 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3098 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3099 goto out;
3100}
3101
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003102static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103
3104/**
3105 * register_gifconf - register a SIOCGIF handler
3106 * @family: Address family
3107 * @gifconf: Function handler
3108 *
3109 * Register protocol dependent address dumping routines. The handler
3110 * that is passed must not be freed or reused until it has been replaced
3111 * by another handler.
3112 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003113int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114{
3115 if (family >= NPROTO)
3116 return -EINVAL;
3117 gifconf_list[family] = gifconf;
3118 return 0;
3119}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003120EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121
3122
3123/*
3124 * Map an interface index to its name (SIOCGIFNAME)
3125 */
3126
3127/*
3128 * We need this ioctl for efficient implementation of the
3129 * if_indextoname() function required by the IPv6 API. Without
3130 * it, we would have to search all the interfaces to find a
3131 * match. --pb
3132 */
3133
Eric W. Biederman881d9662007-09-17 11:56:21 -07003134static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135{
3136 struct net_device *dev;
3137 struct ifreq ifr;
3138
3139 /*
3140 * Fetch the caller's info block.
3141 */
3142
3143 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3144 return -EFAULT;
3145
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003146 rcu_read_lock();
3147 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003149 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 return -ENODEV;
3151 }
3152
3153 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003154 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
3156 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3157 return -EFAULT;
3158 return 0;
3159}
3160
3161/*
3162 * Perform a SIOCGIFCONF call. This structure will change
3163 * size eventually, and there is nothing I can do about it.
3164 * Thus we will need a 'compatibility mode'.
3165 */
3166
Eric W. Biederman881d9662007-09-17 11:56:21 -07003167static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168{
3169 struct ifconf ifc;
3170 struct net_device *dev;
3171 char __user *pos;
3172 int len;
3173 int total;
3174 int i;
3175
3176 /*
3177 * Fetch the caller's info block.
3178 */
3179
3180 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3181 return -EFAULT;
3182
3183 pos = ifc.ifc_buf;
3184 len = ifc.ifc_len;
3185
3186 /*
3187 * Loop over the interfaces, and write an info block for each.
3188 */
3189
3190 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003191 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 for (i = 0; i < NPROTO; i++) {
3193 if (gifconf_list[i]) {
3194 int done;
3195 if (!pos)
3196 done = gifconf_list[i](dev, NULL, 0);
3197 else
3198 done = gifconf_list[i](dev, pos + total,
3199 len - total);
3200 if (done < 0)
3201 return -EFAULT;
3202 total += done;
3203 }
3204 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
3207 /*
3208 * All done. Write the updated control block back to the caller.
3209 */
3210 ifc.ifc_len = total;
3211
3212 /*
3213 * Both BSD and Solaris return 0 here, so we do too.
3214 */
3215 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3216}
3217
3218#ifdef CONFIG_PROC_FS
3219/*
3220 * This is invoked by the /proc filesystem handler to display a device
3221 * in detail.
3222 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003224 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225{
Denis V. Luneve372c412007-11-19 22:31:54 -08003226 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003227 loff_t off;
3228 struct net_device *dev;
3229
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003230 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003231 if (!*pos)
3232 return SEQ_START_TOKEN;
3233
3234 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003235 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003236 if (off++ == *pos)
3237 return dev;
3238
3239 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240}
3241
3242void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3243{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003244 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3245 first_net_device(seq_file_net(seq)) :
3246 next_net_device((struct net_device *)v);
3247
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003249 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250}
3251
3252void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003253 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003255 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256}
3257
3258static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3259{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003260 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
Jesper Dangaard Brouer2d13baf2010-01-05 05:50:52 +00003262 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
Rusty Russell5a1b5892007-04-28 21:04:03 -07003263 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3264 dev->name, stats->rx_bytes, stats->rx_packets,
3265 stats->rx_errors,
3266 stats->rx_dropped + stats->rx_missed_errors,
3267 stats->rx_fifo_errors,
3268 stats->rx_length_errors + stats->rx_over_errors +
3269 stats->rx_crc_errors + stats->rx_frame_errors,
3270 stats->rx_compressed, stats->multicast,
3271 stats->tx_bytes, stats->tx_packets,
3272 stats->tx_errors, stats->tx_dropped,
3273 stats->tx_fifo_errors, stats->collisions,
3274 stats->tx_carrier_errors +
3275 stats->tx_aborted_errors +
3276 stats->tx_window_errors +
3277 stats->tx_heartbeat_errors,
3278 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
3281/*
3282 * Called from the PROCfs module. This now uses the new arbitrary sized
3283 * /proc/net interface to create /proc/net/dev
3284 */
3285static int dev_seq_show(struct seq_file *seq, void *v)
3286{
3287 if (v == SEQ_START_TOKEN)
3288 seq_puts(seq, "Inter-| Receive "
3289 " | Transmit\n"
3290 " face |bytes packets errs drop fifo frame "
3291 "compressed multicast|bytes packets errs "
3292 "drop fifo colls carrier compressed\n");
3293 else
3294 dev_seq_printf_stats(seq, v);
3295 return 0;
3296}
3297
3298static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3299{
3300 struct netif_rx_stats *rc = NULL;
3301
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003302 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003303 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 rc = &per_cpu(netdev_rx_stat, *pos);
3305 break;
3306 } else
3307 ++*pos;
3308 return rc;
3309}
3310
3311static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3312{
3313 return softnet_get_online(pos);
3314}
3315
3316static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3317{
3318 ++*pos;
3319 return softnet_get_online(pos);
3320}
3321
3322static void softnet_seq_stop(struct seq_file *seq, void *v)
3323{
3324}
3325
3326static int softnet_seq_show(struct seq_file *seq, void *v)
3327{
3328 struct netif_rx_stats *s = v;
3329
3330 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003331 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003332 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003333 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 return 0;
3335}
3336
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003337static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 .start = dev_seq_start,
3339 .next = dev_seq_next,
3340 .stop = dev_seq_stop,
3341 .show = dev_seq_show,
3342};
3343
3344static int dev_seq_open(struct inode *inode, struct file *file)
3345{
Denis V. Luneve372c412007-11-19 22:31:54 -08003346 return seq_open_net(inode, file, &dev_seq_ops,
3347 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348}
3349
Arjan van de Ven9a321442007-02-12 00:55:35 -08003350static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 .owner = THIS_MODULE,
3352 .open = dev_seq_open,
3353 .read = seq_read,
3354 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003355 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356};
3357
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003358static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 .start = softnet_seq_start,
3360 .next = softnet_seq_next,
3361 .stop = softnet_seq_stop,
3362 .show = softnet_seq_show,
3363};
3364
3365static int softnet_seq_open(struct inode *inode, struct file *file)
3366{
3367 return seq_open(file, &softnet_seq_ops);
3368}
3369
Arjan van de Ven9a321442007-02-12 00:55:35 -08003370static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 .owner = THIS_MODULE,
3372 .open = softnet_seq_open,
3373 .read = seq_read,
3374 .llseek = seq_lseek,
3375 .release = seq_release,
3376};
3377
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003378static void *ptype_get_idx(loff_t pos)
3379{
3380 struct packet_type *pt = NULL;
3381 loff_t i = 0;
3382 int t;
3383
3384 list_for_each_entry_rcu(pt, &ptype_all, list) {
3385 if (i == pos)
3386 return pt;
3387 ++i;
3388 }
3389
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003390 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003391 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3392 if (i == pos)
3393 return pt;
3394 ++i;
3395 }
3396 }
3397 return NULL;
3398}
3399
3400static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003401 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003402{
3403 rcu_read_lock();
3404 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3405}
3406
3407static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3408{
3409 struct packet_type *pt;
3410 struct list_head *nxt;
3411 int hash;
3412
3413 ++*pos;
3414 if (v == SEQ_START_TOKEN)
3415 return ptype_get_idx(0);
3416
3417 pt = v;
3418 nxt = pt->list.next;
3419 if (pt->type == htons(ETH_P_ALL)) {
3420 if (nxt != &ptype_all)
3421 goto found;
3422 hash = 0;
3423 nxt = ptype_base[0].next;
3424 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003425 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003426
3427 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003428 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003429 return NULL;
3430 nxt = ptype_base[hash].next;
3431 }
3432found:
3433 return list_entry(nxt, struct packet_type, list);
3434}
3435
3436static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003437 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003438{
3439 rcu_read_unlock();
3440}
3441
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003442static int ptype_seq_show(struct seq_file *seq, void *v)
3443{
3444 struct packet_type *pt = v;
3445
3446 if (v == SEQ_START_TOKEN)
3447 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003448 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003449 if (pt->type == htons(ETH_P_ALL))
3450 seq_puts(seq, "ALL ");
3451 else
3452 seq_printf(seq, "%04x", ntohs(pt->type));
3453
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003454 seq_printf(seq, " %-8s %pF\n",
3455 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003456 }
3457
3458 return 0;
3459}
3460
3461static const struct seq_operations ptype_seq_ops = {
3462 .start = ptype_seq_start,
3463 .next = ptype_seq_next,
3464 .stop = ptype_seq_stop,
3465 .show = ptype_seq_show,
3466};
3467
3468static int ptype_seq_open(struct inode *inode, struct file *file)
3469{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003470 return seq_open_net(inode, file, &ptype_seq_ops,
3471 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003472}
3473
3474static const struct file_operations ptype_seq_fops = {
3475 .owner = THIS_MODULE,
3476 .open = ptype_seq_open,
3477 .read = seq_read,
3478 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003479 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003480};
3481
3482
Pavel Emelyanov46650792007-10-08 20:38:39 -07003483static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484{
3485 int rc = -ENOMEM;
3486
Eric W. Biederman881d9662007-09-17 11:56:21 -07003487 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003489 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003491 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003492 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003493
Eric W. Biederman881d9662007-09-17 11:56:21 -07003494 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003495 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 rc = 0;
3497out:
3498 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003499out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003500 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003502 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003504 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 goto out;
3506}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003507
Pavel Emelyanov46650792007-10-08 20:38:39 -07003508static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003509{
3510 wext_proc_exit(net);
3511
3512 proc_net_remove(net, "ptype");
3513 proc_net_remove(net, "softnet_stat");
3514 proc_net_remove(net, "dev");
3515}
3516
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003517static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003518 .init = dev_proc_net_init,
3519 .exit = dev_proc_net_exit,
3520};
3521
3522static int __init dev_proc_init(void)
3523{
3524 return register_pernet_subsys(&dev_proc_ops);
3525}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526#else
3527#define dev_proc_init() 0
3528#endif /* CONFIG_PROC_FS */
3529
3530
3531/**
3532 * netdev_set_master - set up master/slave pair
3533 * @slave: slave device
3534 * @master: new master device
3535 *
3536 * Changes the master device of the slave. Pass %NULL to break the
3537 * bonding. The caller must hold the RTNL semaphore. On a failure
3538 * a negative errno code is returned. On success the reference counts
3539 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3540 * function returns zero.
3541 */
3542int netdev_set_master(struct net_device *slave, struct net_device *master)
3543{
3544 struct net_device *old = slave->master;
3545
3546 ASSERT_RTNL();
3547
3548 if (master) {
3549 if (old)
3550 return -EBUSY;
3551 dev_hold(master);
3552 }
3553
3554 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 synchronize_net();
3557
3558 if (old)
3559 dev_put(old);
3560
3561 if (master)
3562 slave->flags |= IFF_SLAVE;
3563 else
3564 slave->flags &= ~IFF_SLAVE;
3565
3566 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3567 return 0;
3568}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003569EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003571static void dev_change_rx_flags(struct net_device *dev, int flags)
3572{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003573 const struct net_device_ops *ops = dev->netdev_ops;
3574
3575 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3576 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003577}
3578
Wang Chendad9b332008-06-18 01:48:28 -07003579static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003580{
3581 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003582 uid_t uid;
3583 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003584
Patrick McHardy24023452007-07-14 18:51:31 -07003585 ASSERT_RTNL();
3586
Wang Chendad9b332008-06-18 01:48:28 -07003587 dev->flags |= IFF_PROMISC;
3588 dev->promiscuity += inc;
3589 if (dev->promiscuity == 0) {
3590 /*
3591 * Avoid overflow.
3592 * If inc causes overflow, untouch promisc and return error.
3593 */
3594 if (inc < 0)
3595 dev->flags &= ~IFF_PROMISC;
3596 else {
3597 dev->promiscuity -= inc;
3598 printk(KERN_WARNING "%s: promiscuity touches roof, "
3599 "set promiscuity failed, promiscuity feature "
3600 "of device might be broken.\n", dev->name);
3601 return -EOVERFLOW;
3602 }
3603 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003604 if (dev->flags != old_flags) {
3605 printk(KERN_INFO "device %s %s promiscuous mode\n",
3606 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3607 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003608 if (audit_enabled) {
3609 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003610 audit_log(current->audit_context, GFP_ATOMIC,
3611 AUDIT_ANOM_PROMISCUOUS,
3612 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3613 dev->name, (dev->flags & IFF_PROMISC),
3614 (old_flags & IFF_PROMISC),
3615 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003616 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003617 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003618 }
Patrick McHardy24023452007-07-14 18:51:31 -07003619
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003620 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003621 }
Wang Chendad9b332008-06-18 01:48:28 -07003622 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003623}
3624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625/**
3626 * dev_set_promiscuity - update promiscuity count on a device
3627 * @dev: device
3628 * @inc: modifier
3629 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003630 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 * remains above zero the interface remains promiscuous. Once it hits zero
3632 * the device reverts back to normal filtering operation. A negative inc
3633 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003634 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 */
Wang Chendad9b332008-06-18 01:48:28 -07003636int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637{
3638 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003639 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
Wang Chendad9b332008-06-18 01:48:28 -07003641 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003642 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003643 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003644 if (dev->flags != old_flags)
3645 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003646 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003648EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649
3650/**
3651 * dev_set_allmulti - update allmulti count on a device
3652 * @dev: device
3653 * @inc: modifier
3654 *
3655 * Add or remove reception of all multicast frames to a device. While the
3656 * count in the device remains above zero the interface remains listening
3657 * to all interfaces. Once it hits zero the device reverts back to normal
3658 * filtering operation. A negative @inc value is used to drop the counter
3659 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003660 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 */
3662
Wang Chendad9b332008-06-18 01:48:28 -07003663int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664{
3665 unsigned short old_flags = dev->flags;
3666
Patrick McHardy24023452007-07-14 18:51:31 -07003667 ASSERT_RTNL();
3668
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003670 dev->allmulti += inc;
3671 if (dev->allmulti == 0) {
3672 /*
3673 * Avoid overflow.
3674 * If inc causes overflow, untouch allmulti and return error.
3675 */
3676 if (inc < 0)
3677 dev->flags &= ~IFF_ALLMULTI;
3678 else {
3679 dev->allmulti -= inc;
3680 printk(KERN_WARNING "%s: allmulti touches roof, "
3681 "set allmulti failed, allmulti feature of "
3682 "device might be broken.\n", dev->name);
3683 return -EOVERFLOW;
3684 }
3685 }
Patrick McHardy24023452007-07-14 18:51:31 -07003686 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003687 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003688 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003689 }
Wang Chendad9b332008-06-18 01:48:28 -07003690 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003691}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003692EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003693
3694/*
3695 * Upload unicast and multicast address lists to device and
3696 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003697 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003698 * are present.
3699 */
3700void __dev_set_rx_mode(struct net_device *dev)
3701{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003702 const struct net_device_ops *ops = dev->netdev_ops;
3703
Patrick McHardy4417da62007-06-27 01:28:10 -07003704 /* dev_open will call this function so the list will stay sane. */
3705 if (!(dev->flags&IFF_UP))
3706 return;
3707
3708 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003709 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003710
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003711 if (ops->ndo_set_rx_mode)
3712 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003713 else {
3714 /* Unicast addresses changes may only happen under the rtnl,
3715 * therefore calling __dev_set_promiscuity here is safe.
3716 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003717 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003718 __dev_set_promiscuity(dev, 1);
3719 dev->uc_promisc = 1;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003720 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003721 __dev_set_promiscuity(dev, -1);
3722 dev->uc_promisc = 0;
3723 }
3724
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003725 if (ops->ndo_set_multicast_list)
3726 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003727 }
3728}
3729
3730void dev_set_rx_mode(struct net_device *dev)
3731{
David S. Millerb9e40852008-07-15 00:15:08 -07003732 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003733 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003734 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Jiri Pirkof001fde2009-05-05 02:48:28 +00003737/* hw addresses list handling functions */
3738
Jiri Pirko31278e72009-06-17 01:12:19 +00003739static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3740 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003741{
3742 struct netdev_hw_addr *ha;
3743 int alloc_size;
3744
3745 if (addr_len > MAX_ADDR_LEN)
3746 return -EINVAL;
3747
Jiri Pirko31278e72009-06-17 01:12:19 +00003748 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003749 if (!memcmp(ha->addr, addr, addr_len) &&
3750 ha->type == addr_type) {
3751 ha->refcount++;
3752 return 0;
3753 }
3754 }
3755
3756
Jiri Pirkof001fde2009-05-05 02:48:28 +00003757 alloc_size = sizeof(*ha);
3758 if (alloc_size < L1_CACHE_BYTES)
3759 alloc_size = L1_CACHE_BYTES;
3760 ha = kmalloc(alloc_size, GFP_ATOMIC);
3761 if (!ha)
3762 return -ENOMEM;
3763 memcpy(ha->addr, addr, addr_len);
3764 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003765 ha->refcount = 1;
3766 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003767 list_add_tail_rcu(&ha->list, &list->list);
3768 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003769 return 0;
3770}
3771
3772static void ha_rcu_free(struct rcu_head *head)
3773{
3774 struct netdev_hw_addr *ha;
3775
3776 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3777 kfree(ha);
3778}
3779
Jiri Pirko31278e72009-06-17 01:12:19 +00003780static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3781 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003782{
3783 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003784
Jiri Pirko31278e72009-06-17 01:12:19 +00003785 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003786 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003787 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003788 if (--ha->refcount)
3789 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003790 list_del_rcu(&ha->list);
3791 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003792 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003793 return 0;
3794 }
3795 }
3796 return -ENOENT;
3797}
3798
Jiri Pirko31278e72009-06-17 01:12:19 +00003799static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3800 struct netdev_hw_addr_list *from_list,
3801 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003802 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003803{
3804 int err;
3805 struct netdev_hw_addr *ha, *ha2;
3806 unsigned char type;
3807
Jiri Pirko31278e72009-06-17 01:12:19 +00003808 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003809 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003810 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003811 if (err)
3812 goto unroll;
3813 }
3814 return 0;
3815
3816unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003817 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003818 if (ha2 == ha)
3819 break;
3820 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003821 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003822 }
3823 return err;
3824}
3825
Jiri Pirko31278e72009-06-17 01:12:19 +00003826static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3827 struct netdev_hw_addr_list *from_list,
3828 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003829 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003830{
3831 struct netdev_hw_addr *ha;
3832 unsigned char type;
3833
Jiri Pirko31278e72009-06-17 01:12:19 +00003834 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003835 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003836 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003837 }
3838}
3839
Jiri Pirko31278e72009-06-17 01:12:19 +00003840static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3841 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003842 int addr_len)
3843{
3844 int err = 0;
3845 struct netdev_hw_addr *ha, *tmp;
3846
Jiri Pirko31278e72009-06-17 01:12:19 +00003847 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003848 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003849 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003850 addr_len, ha->type);
3851 if (err)
3852 break;
3853 ha->synced = true;
3854 ha->refcount++;
3855 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003856 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3857 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003858 }
3859 }
3860 return err;
3861}
3862
Jiri Pirko31278e72009-06-17 01:12:19 +00003863static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3864 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003865 int addr_len)
3866{
3867 struct netdev_hw_addr *ha, *tmp;
3868
Jiri Pirko31278e72009-06-17 01:12:19 +00003869 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003870 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003871 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003872 addr_len, ha->type);
3873 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003874 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003875 addr_len, ha->type);
3876 }
3877 }
3878}
3879
Jiri Pirko31278e72009-06-17 01:12:19 +00003880static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003881{
3882 struct netdev_hw_addr *ha, *tmp;
3883
Jiri Pirko31278e72009-06-17 01:12:19 +00003884 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003885 list_del_rcu(&ha->list);
3886 call_rcu(&ha->rcu_head, ha_rcu_free);
3887 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003888 list->count = 0;
3889}
3890
3891static void __hw_addr_init(struct netdev_hw_addr_list *list)
3892{
3893 INIT_LIST_HEAD(&list->list);
3894 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003895}
3896
3897/* Device addresses handling functions */
3898
3899static void dev_addr_flush(struct net_device *dev)
3900{
3901 /* rtnl_mutex must be held here */
3902
Jiri Pirko31278e72009-06-17 01:12:19 +00003903 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003904 dev->dev_addr = NULL;
3905}
3906
3907static int dev_addr_init(struct net_device *dev)
3908{
3909 unsigned char addr[MAX_ADDR_LEN];
3910 struct netdev_hw_addr *ha;
3911 int err;
3912
3913 /* rtnl_mutex must be held here */
3914
Jiri Pirko31278e72009-06-17 01:12:19 +00003915 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003916 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003917 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003918 NETDEV_HW_ADDR_T_LAN);
3919 if (!err) {
3920 /*
3921 * Get the first (previously created) address from the list
3922 * and set dev_addr pointer to this location.
3923 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003924 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003925 struct netdev_hw_addr, list);
3926 dev->dev_addr = ha->addr;
3927 }
3928 return err;
3929}
3930
3931/**
3932 * dev_addr_add - Add a device address
3933 * @dev: device
3934 * @addr: address to add
3935 * @addr_type: address type
3936 *
3937 * Add a device address to the device or increase the reference count if
3938 * it already exists.
3939 *
3940 * The caller must hold the rtnl_mutex.
3941 */
3942int dev_addr_add(struct net_device *dev, unsigned char *addr,
3943 unsigned char addr_type)
3944{
3945 int err;
3946
3947 ASSERT_RTNL();
3948
Jiri Pirko31278e72009-06-17 01:12:19 +00003949 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003950 if (!err)
3951 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3952 return err;
3953}
3954EXPORT_SYMBOL(dev_addr_add);
3955
3956/**
3957 * dev_addr_del - Release a device address.
3958 * @dev: device
3959 * @addr: address to delete
3960 * @addr_type: address type
3961 *
3962 * Release reference to a device address and remove it from the device
3963 * if the reference count drops to zero.
3964 *
3965 * The caller must hold the rtnl_mutex.
3966 */
3967int dev_addr_del(struct net_device *dev, unsigned char *addr,
3968 unsigned char addr_type)
3969{
3970 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003971 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003972
3973 ASSERT_RTNL();
3974
Jiri Pirkoccffad252009-05-22 23:22:17 +00003975 /*
3976 * We can not remove the first address from the list because
3977 * dev->dev_addr points to that.
3978 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003979 ha = list_first_entry(&dev->dev_addrs.list,
3980 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003981 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3982 return -ENOENT;
3983
Jiri Pirko31278e72009-06-17 01:12:19 +00003984 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003985 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003986 if (!err)
3987 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3988 return err;
3989}
3990EXPORT_SYMBOL(dev_addr_del);
3991
3992/**
3993 * dev_addr_add_multiple - Add device addresses from another device
3994 * @to_dev: device to which addresses will be added
3995 * @from_dev: device from which addresses will be added
3996 * @addr_type: address type - 0 means type will be used from from_dev
3997 *
3998 * Add device addresses of the one device to another.
3999 **
4000 * The caller must hold the rtnl_mutex.
4001 */
4002int dev_addr_add_multiple(struct net_device *to_dev,
4003 struct net_device *from_dev,
4004 unsigned char addr_type)
4005{
4006 int err;
4007
4008 ASSERT_RTNL();
4009
4010 if (from_dev->addr_len != to_dev->addr_len)
4011 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004012 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004013 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004014 if (!err)
4015 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4016 return err;
4017}
4018EXPORT_SYMBOL(dev_addr_add_multiple);
4019
4020/**
4021 * dev_addr_del_multiple - Delete device addresses by another device
4022 * @to_dev: device where the addresses will be deleted
4023 * @from_dev: device by which addresses the addresses will be deleted
4024 * @addr_type: address type - 0 means type will used from from_dev
4025 *
4026 * Deletes addresses in to device by the list of addresses in from device.
4027 *
4028 * The caller must hold the rtnl_mutex.
4029 */
4030int dev_addr_del_multiple(struct net_device *to_dev,
4031 struct net_device *from_dev,
4032 unsigned char addr_type)
4033{
4034 ASSERT_RTNL();
4035
4036 if (from_dev->addr_len != to_dev->addr_len)
4037 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004038 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004039 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004040 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4041 return 0;
4042}
4043EXPORT_SYMBOL(dev_addr_del_multiple);
4044
Jiri Pirko31278e72009-06-17 01:12:19 +00004045/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00004046
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004047int __dev_addr_delete(struct dev_addr_list **list, int *count,
4048 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004049{
4050 struct dev_addr_list *da;
4051
4052 for (; (da = *list) != NULL; list = &da->next) {
4053 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4054 alen == da->da_addrlen) {
4055 if (glbl) {
4056 int old_glbl = da->da_gusers;
4057 da->da_gusers = 0;
4058 if (old_glbl == 0)
4059 break;
4060 }
4061 if (--da->da_users)
4062 return 0;
4063
4064 *list = da->next;
4065 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004066 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07004067 return 0;
4068 }
4069 }
4070 return -ENOENT;
4071}
4072
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004073int __dev_addr_add(struct dev_addr_list **list, int *count,
4074 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004075{
4076 struct dev_addr_list *da;
4077
4078 for (da = *list; da != NULL; da = da->next) {
4079 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4080 da->da_addrlen == alen) {
4081 if (glbl) {
4082 int old_glbl = da->da_gusers;
4083 da->da_gusers = 1;
4084 if (old_glbl)
4085 return 0;
4086 }
4087 da->da_users++;
4088 return 0;
4089 }
4090 }
4091
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08004092 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07004093 if (da == NULL)
4094 return -ENOMEM;
4095 memcpy(da->da_addr, addr, alen);
4096 da->da_addrlen = alen;
4097 da->da_users = 1;
4098 da->da_gusers = glbl ? 1 : 0;
4099 da->next = *list;
4100 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004101 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07004102 return 0;
4103}
4104
Patrick McHardy4417da62007-06-27 01:28:10 -07004105/**
4106 * dev_unicast_delete - Release secondary unicast address.
4107 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004108 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07004109 *
4110 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004111 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07004112 *
4113 * The caller must hold the rtnl_mutex.
4114 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004115int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004116{
4117 int err;
4118
4119 ASSERT_RTNL();
4120
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004121 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004122 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4123 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004124 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004125 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004126 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004127 return err;
4128}
4129EXPORT_SYMBOL(dev_unicast_delete);
4130
4131/**
4132 * dev_unicast_add - add a secondary unicast address
4133 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004134 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004135 *
4136 * Add a secondary unicast address to the device or increase
4137 * the reference count if it already exists.
4138 *
4139 * The caller must hold the rtnl_mutex.
4140 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004141int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004142{
4143 int err;
4144
4145 ASSERT_RTNL();
4146
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004147 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004148 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4149 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004150 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004151 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004152 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004153 return err;
4154}
4155EXPORT_SYMBOL(dev_unicast_add);
4156
Chris Leeche83a2ea2008-01-31 16:53:23 -08004157int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4158 struct dev_addr_list **from, int *from_count)
4159{
4160 struct dev_addr_list *da, *next;
4161 int err = 0;
4162
4163 da = *from;
4164 while (da != NULL) {
4165 next = da->next;
4166 if (!da->da_synced) {
4167 err = __dev_addr_add(to, to_count,
4168 da->da_addr, da->da_addrlen, 0);
4169 if (err < 0)
4170 break;
4171 da->da_synced = 1;
4172 da->da_users++;
4173 } else if (da->da_users == 1) {
4174 __dev_addr_delete(to, to_count,
4175 da->da_addr, da->da_addrlen, 0);
4176 __dev_addr_delete(from, from_count,
4177 da->da_addr, da->da_addrlen, 0);
4178 }
4179 da = next;
4180 }
4181 return err;
4182}
Johannes Bergc4029082009-06-17 17:43:30 +02004183EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004184
4185void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4186 struct dev_addr_list **from, int *from_count)
4187{
4188 struct dev_addr_list *da, *next;
4189
4190 da = *from;
4191 while (da != NULL) {
4192 next = da->next;
4193 if (da->da_synced) {
4194 __dev_addr_delete(to, to_count,
4195 da->da_addr, da->da_addrlen, 0);
4196 da->da_synced = 0;
4197 __dev_addr_delete(from, from_count,
4198 da->da_addr, da->da_addrlen, 0);
4199 }
4200 da = next;
4201 }
4202}
Johannes Bergc4029082009-06-17 17:43:30 +02004203EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004204
4205/**
4206 * dev_unicast_sync - Synchronize device's unicast list to another device
4207 * @to: destination device
4208 * @from: source device
4209 *
4210 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004211 * addresses that have no users left. The source device must be
4212 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004213 *
4214 * This function is intended to be called from the dev->set_rx_mode
4215 * function of layered software devices.
4216 */
4217int dev_unicast_sync(struct net_device *to, struct net_device *from)
4218{
4219 int err = 0;
4220
Jiri Pirkoccffad252009-05-22 23:22:17 +00004221 if (to->addr_len != from->addr_len)
4222 return -EINVAL;
4223
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004224 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004225 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004226 if (!err)
4227 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004228 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004229 return err;
4230}
4231EXPORT_SYMBOL(dev_unicast_sync);
4232
4233/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004234 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004235 * @to: destination device
4236 * @from: source device
4237 *
4238 * Remove all addresses that were added to the destination device by
4239 * dev_unicast_sync(). This function is intended to be called from the
4240 * dev->stop function of layered software devices.
4241 */
4242void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4243{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004244 if (to->addr_len != from->addr_len)
4245 return;
4246
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004247 netif_addr_lock_bh(from);
4248 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004249 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004250 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004251 netif_addr_unlock(to);
4252 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004253}
4254EXPORT_SYMBOL(dev_unicast_unsync);
4255
Jiri Pirkoccffad252009-05-22 23:22:17 +00004256static void dev_unicast_flush(struct net_device *dev)
4257{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004258 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004259 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004260 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004261}
4262
4263static void dev_unicast_init(struct net_device *dev)
4264{
Jiri Pirko31278e72009-06-17 01:12:19 +00004265 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004266}
4267
4268
Denis Cheng12972622007-07-18 02:12:56 -07004269static void __dev_addr_discard(struct dev_addr_list **list)
4270{
4271 struct dev_addr_list *tmp;
4272
4273 while (*list != NULL) {
4274 tmp = *list;
4275 *list = tmp->next;
4276 if (tmp->da_users > tmp->da_gusers)
4277 printk("__dev_addr_discard: address leakage! "
4278 "da_users=%d\n", tmp->da_users);
4279 kfree(tmp);
4280 }
4281}
4282
Denis Cheng26cc2522007-07-18 02:12:03 -07004283static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004284{
David S. Millerb9e40852008-07-15 00:15:08 -07004285 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004286
Denis Cheng456ad752007-07-18 02:10:54 -07004287 __dev_addr_discard(&dev->mc_list);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004288 netdev_mc_count(dev) = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004289
David S. Millerb9e40852008-07-15 00:15:08 -07004290 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004291}
4292
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004293/**
4294 * dev_get_flags - get flags reported to userspace
4295 * @dev: device
4296 *
4297 * Get the combination of flag bits exported through APIs to userspace.
4298 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299unsigned dev_get_flags(const struct net_device *dev)
4300{
4301 unsigned flags;
4302
4303 flags = (dev->flags & ~(IFF_PROMISC |
4304 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004305 IFF_RUNNING |
4306 IFF_LOWER_UP |
4307 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 (dev->gflags & (IFF_PROMISC |
4309 IFF_ALLMULTI));
4310
Stefan Rompfb00055a2006-03-20 17:09:11 -08004311 if (netif_running(dev)) {
4312 if (netif_oper_up(dev))
4313 flags |= IFF_RUNNING;
4314 if (netif_carrier_ok(dev))
4315 flags |= IFF_LOWER_UP;
4316 if (netif_dormant(dev))
4317 flags |= IFF_DORMANT;
4318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319
4320 return flags;
4321}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004322EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
Patrick McHardybd380812010-02-26 06:34:53 +00004324int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004327 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
Patrick McHardy24023452007-07-14 18:51:31 -07004329 ASSERT_RTNL();
4330
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 /*
4332 * Set the flags on our device.
4333 */
4334
4335 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4336 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4337 IFF_AUTOMEDIA)) |
4338 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4339 IFF_ALLMULTI));
4340
4341 /*
4342 * Load in the correct multicast list now the flags have changed.
4343 */
4344
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004345 if ((old_flags ^ flags) & IFF_MULTICAST)
4346 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004347
Patrick McHardy4417da62007-06-27 01:28:10 -07004348 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349
4350 /*
4351 * Have we downed the interface. We handle IFF_UP ourselves
4352 * according to user attempts to set it, rather than blindly
4353 * setting it.
4354 */
4355
4356 ret = 0;
4357 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004358 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
4360 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004361 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 }
4363
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004365 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 dev->gflags ^= IFF_PROMISC;
4368 dev_set_promiscuity(dev, inc);
4369 }
4370
4371 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4372 is important. Some (broken) drivers set IFF_PROMISC, when
4373 IFF_ALLMULTI is requested not asking us and not reporting.
4374 */
4375 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004376 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4377
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 dev->gflags ^= IFF_ALLMULTI;
4379 dev_set_allmulti(dev, inc);
4380 }
4381
Patrick McHardybd380812010-02-26 06:34:53 +00004382 return ret;
4383}
4384
4385void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4386{
4387 unsigned int changes = dev->flags ^ old_flags;
4388
4389 if (changes & IFF_UP) {
4390 if (dev->flags & IFF_UP)
4391 call_netdevice_notifiers(NETDEV_UP, dev);
4392 else
4393 call_netdevice_notifiers(NETDEV_DOWN, dev);
4394 }
4395
4396 if (dev->flags & IFF_UP &&
4397 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4398 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4399}
4400
4401/**
4402 * dev_change_flags - change device settings
4403 * @dev: device
4404 * @flags: device state flags
4405 *
4406 * Change settings on device based state flags. The flags are
4407 * in the userspace exported format.
4408 */
4409int dev_change_flags(struct net_device *dev, unsigned flags)
4410{
4411 int ret, changes;
4412 int old_flags = dev->flags;
4413
4414 ret = __dev_change_flags(dev, flags);
4415 if (ret < 0)
4416 return ret;
4417
4418 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004419 if (changes)
4420 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
Patrick McHardybd380812010-02-26 06:34:53 +00004422 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 return ret;
4424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004425EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004427/**
4428 * dev_set_mtu - Change maximum transfer unit
4429 * @dev: device
4430 * @new_mtu: new transfer unit
4431 *
4432 * Change the maximum transfer size of the network device.
4433 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434int dev_set_mtu(struct net_device *dev, int new_mtu)
4435{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004436 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 int err;
4438
4439 if (new_mtu == dev->mtu)
4440 return 0;
4441
4442 /* MTU must be positive. */
4443 if (new_mtu < 0)
4444 return -EINVAL;
4445
4446 if (!netif_device_present(dev))
4447 return -ENODEV;
4448
4449 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004450 if (ops->ndo_change_mtu)
4451 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 else
4453 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004454
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004456 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 return err;
4458}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004459EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004461/**
4462 * dev_set_mac_address - Change Media Access Control Address
4463 * @dev: device
4464 * @sa: new address
4465 *
4466 * Change the hardware (MAC) address of the device
4467 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4469{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004470 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 int err;
4472
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004473 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 return -EOPNOTSUPP;
4475 if (sa->sa_family != dev->type)
4476 return -EINVAL;
4477 if (!netif_device_present(dev))
4478 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004479 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004481 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 return err;
4483}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004484EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485
4486/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004487 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004489static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
4491 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004492 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493
4494 if (!dev)
4495 return -ENODEV;
4496
4497 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004498 case SIOCGIFFLAGS: /* Get interface flags */
4499 ifr->ifr_flags = (short) dev_get_flags(dev);
4500 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004502 case SIOCGIFMETRIC: /* Get the metric on the interface
4503 (currently unused) */
4504 ifr->ifr_metric = 0;
4505 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004507 case SIOCGIFMTU: /* Get the MTU of a device */
4508 ifr->ifr_mtu = dev->mtu;
4509 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004511 case SIOCGIFHWADDR:
4512 if (!dev->addr_len)
4513 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4514 else
4515 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4516 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4517 ifr->ifr_hwaddr.sa_family = dev->type;
4518 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004520 case SIOCGIFSLAVE:
4521 err = -EINVAL;
4522 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004523
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004524 case SIOCGIFMAP:
4525 ifr->ifr_map.mem_start = dev->mem_start;
4526 ifr->ifr_map.mem_end = dev->mem_end;
4527 ifr->ifr_map.base_addr = dev->base_addr;
4528 ifr->ifr_map.irq = dev->irq;
4529 ifr->ifr_map.dma = dev->dma;
4530 ifr->ifr_map.port = dev->if_port;
4531 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004532
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004533 case SIOCGIFINDEX:
4534 ifr->ifr_ifindex = dev->ifindex;
4535 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004536
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004537 case SIOCGIFTXQLEN:
4538 ifr->ifr_qlen = dev->tx_queue_len;
4539 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004540
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004541 default:
4542 /* dev_ioctl() should ensure this case
4543 * is never reached
4544 */
4545 WARN_ON(1);
4546 err = -EINVAL;
4547 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004548
4549 }
4550 return err;
4551}
4552
4553/*
4554 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4555 */
4556static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4557{
4558 int err;
4559 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004560 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004561
4562 if (!dev)
4563 return -ENODEV;
4564
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004565 ops = dev->netdev_ops;
4566
Jeff Garzik14e3e072007-10-08 00:06:32 -07004567 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004568 case SIOCSIFFLAGS: /* Set interface flags */
4569 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004570
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004571 case SIOCSIFMETRIC: /* Set the metric on the interface
4572 (currently unused) */
4573 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004574
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004575 case SIOCSIFMTU: /* Set the MTU of a device */
4576 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004577
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004578 case SIOCSIFHWADDR:
4579 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004581 case SIOCSIFHWBROADCAST:
4582 if (ifr->ifr_hwaddr.sa_family != dev->type)
4583 return -EINVAL;
4584 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4585 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4586 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4587 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004589 case SIOCSIFMAP:
4590 if (ops->ndo_set_config) {
4591 if (!netif_device_present(dev))
4592 return -ENODEV;
4593 return ops->ndo_set_config(dev, &ifr->ifr_map);
4594 }
4595 return -EOPNOTSUPP;
4596
4597 case SIOCADDMULTI:
4598 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4599 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4600 return -EINVAL;
4601 if (!netif_device_present(dev))
4602 return -ENODEV;
4603 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4604 dev->addr_len, 1);
4605
4606 case SIOCDELMULTI:
4607 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4608 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4609 return -EINVAL;
4610 if (!netif_device_present(dev))
4611 return -ENODEV;
4612 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4613 dev->addr_len, 1);
4614
4615 case SIOCSIFTXQLEN:
4616 if (ifr->ifr_qlen < 0)
4617 return -EINVAL;
4618 dev->tx_queue_len = ifr->ifr_qlen;
4619 return 0;
4620
4621 case SIOCSIFNAME:
4622 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4623 return dev_change_name(dev, ifr->ifr_newname);
4624
4625 /*
4626 * Unknown or private ioctl
4627 */
4628 default:
4629 if ((cmd >= SIOCDEVPRIVATE &&
4630 cmd <= SIOCDEVPRIVATE + 15) ||
4631 cmd == SIOCBONDENSLAVE ||
4632 cmd == SIOCBONDRELEASE ||
4633 cmd == SIOCBONDSETHWADDR ||
4634 cmd == SIOCBONDSLAVEINFOQUERY ||
4635 cmd == SIOCBONDINFOQUERY ||
4636 cmd == SIOCBONDCHANGEACTIVE ||
4637 cmd == SIOCGMIIPHY ||
4638 cmd == SIOCGMIIREG ||
4639 cmd == SIOCSMIIREG ||
4640 cmd == SIOCBRADDIF ||
4641 cmd == SIOCBRDELIF ||
4642 cmd == SIOCSHWTSTAMP ||
4643 cmd == SIOCWANDEV) {
4644 err = -EOPNOTSUPP;
4645 if (ops->ndo_do_ioctl) {
4646 if (netif_device_present(dev))
4647 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4648 else
4649 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004651 } else
4652 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653
4654 }
4655 return err;
4656}
4657
4658/*
4659 * This function handles all "interface"-type I/O control requests. The actual
4660 * 'doing' part of this is dev_ifsioc above.
4661 */
4662
4663/**
4664 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004665 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 * @cmd: command to issue
4667 * @arg: pointer to a struct ifreq in user space
4668 *
4669 * Issue ioctl functions to devices. This is normally called by the
4670 * user space syscall interfaces but can sometimes be useful for
4671 * other purposes. The return value is the return from the syscall if
4672 * positive or a negative errno code on error.
4673 */
4674
Eric W. Biederman881d9662007-09-17 11:56:21 -07004675int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676{
4677 struct ifreq ifr;
4678 int ret;
4679 char *colon;
4680
4681 /* One special case: SIOCGIFCONF takes ifconf argument
4682 and requires shared lock, because it sleeps writing
4683 to user space.
4684 */
4685
4686 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004687 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004688 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004689 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 return ret;
4691 }
4692 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004693 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694
4695 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4696 return -EFAULT;
4697
4698 ifr.ifr_name[IFNAMSIZ-1] = 0;
4699
4700 colon = strchr(ifr.ifr_name, ':');
4701 if (colon)
4702 *colon = 0;
4703
4704 /*
4705 * See which interface the caller is talking about.
4706 */
4707
4708 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004709 /*
4710 * These ioctl calls:
4711 * - can be done by all.
4712 * - atomic and do not require locking.
4713 * - return a value
4714 */
4715 case SIOCGIFFLAGS:
4716 case SIOCGIFMETRIC:
4717 case SIOCGIFMTU:
4718 case SIOCGIFHWADDR:
4719 case SIOCGIFSLAVE:
4720 case SIOCGIFMAP:
4721 case SIOCGIFINDEX:
4722 case SIOCGIFTXQLEN:
4723 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004724 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004725 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004726 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004727 if (!ret) {
4728 if (colon)
4729 *colon = ':';
4730 if (copy_to_user(arg, &ifr,
4731 sizeof(struct ifreq)))
4732 ret = -EFAULT;
4733 }
4734 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004736 case SIOCETHTOOL:
4737 dev_load(net, ifr.ifr_name);
4738 rtnl_lock();
4739 ret = dev_ethtool(net, &ifr);
4740 rtnl_unlock();
4741 if (!ret) {
4742 if (colon)
4743 *colon = ':';
4744 if (copy_to_user(arg, &ifr,
4745 sizeof(struct ifreq)))
4746 ret = -EFAULT;
4747 }
4748 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004750 /*
4751 * These ioctl calls:
4752 * - require superuser power.
4753 * - require strict serialization.
4754 * - return a value
4755 */
4756 case SIOCGMIIPHY:
4757 case SIOCGMIIREG:
4758 case SIOCSIFNAME:
4759 if (!capable(CAP_NET_ADMIN))
4760 return -EPERM;
4761 dev_load(net, ifr.ifr_name);
4762 rtnl_lock();
4763 ret = dev_ifsioc(net, &ifr, cmd);
4764 rtnl_unlock();
4765 if (!ret) {
4766 if (colon)
4767 *colon = ':';
4768 if (copy_to_user(arg, &ifr,
4769 sizeof(struct ifreq)))
4770 ret = -EFAULT;
4771 }
4772 return ret;
4773
4774 /*
4775 * These ioctl calls:
4776 * - require superuser power.
4777 * - require strict serialization.
4778 * - do not return a value
4779 */
4780 case SIOCSIFFLAGS:
4781 case SIOCSIFMETRIC:
4782 case SIOCSIFMTU:
4783 case SIOCSIFMAP:
4784 case SIOCSIFHWADDR:
4785 case SIOCSIFSLAVE:
4786 case SIOCADDMULTI:
4787 case SIOCDELMULTI:
4788 case SIOCSIFHWBROADCAST:
4789 case SIOCSIFTXQLEN:
4790 case SIOCSMIIREG:
4791 case SIOCBONDENSLAVE:
4792 case SIOCBONDRELEASE:
4793 case SIOCBONDSETHWADDR:
4794 case SIOCBONDCHANGEACTIVE:
4795 case SIOCBRADDIF:
4796 case SIOCBRDELIF:
4797 case SIOCSHWTSTAMP:
4798 if (!capable(CAP_NET_ADMIN))
4799 return -EPERM;
4800 /* fall through */
4801 case SIOCBONDSLAVEINFOQUERY:
4802 case SIOCBONDINFOQUERY:
4803 dev_load(net, ifr.ifr_name);
4804 rtnl_lock();
4805 ret = dev_ifsioc(net, &ifr, cmd);
4806 rtnl_unlock();
4807 return ret;
4808
4809 case SIOCGIFMEM:
4810 /* Get the per device memory space. We can add this but
4811 * currently do not support it */
4812 case SIOCSIFMEM:
4813 /* Set the per device memory buffer space.
4814 * Not applicable in our case */
4815 case SIOCSIFLINK:
4816 return -EINVAL;
4817
4818 /*
4819 * Unknown or private ioctl.
4820 */
4821 default:
4822 if (cmd == SIOCWANDEV ||
4823 (cmd >= SIOCDEVPRIVATE &&
4824 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004825 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004827 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004829 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004831 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004833 }
4834 /* Take care of Wireless Extensions */
4835 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4836 return wext_handle_ioctl(net, &ifr, cmd, arg);
4837 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838 }
4839}
4840
4841
4842/**
4843 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004844 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 *
4846 * Returns a suitable unique value for a new device interface
4847 * number. The caller must hold the rtnl semaphore or the
4848 * dev_base_lock to be sure it remains unique.
4849 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004850static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851{
4852 static int ifindex;
4853 for (;;) {
4854 if (++ifindex <= 0)
4855 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004856 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857 return ifindex;
4858 }
4859}
4860
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004862static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004864static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867}
4868
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004869static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004870{
Krishna Kumare93737b2009-12-08 22:26:02 +00004871 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004872
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004873 BUG_ON(dev_boot_phase);
4874 ASSERT_RTNL();
4875
Krishna Kumare93737b2009-12-08 22:26:02 +00004876 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004877 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004878 * for initialization unwind. Remove those
4879 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004880 */
4881 if (dev->reg_state == NETREG_UNINITIALIZED) {
4882 pr_debug("unregister_netdevice: device %s/%p never "
4883 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004884
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004885 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004886 list_del(&dev->unreg_list);
4887 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004888 }
4889
4890 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4891
4892 /* If device is running, close it first. */
4893 dev_close(dev);
4894
4895 /* And unlink it from device chain. */
4896 unlist_netdevice(dev);
4897
4898 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004899 }
4900
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004901 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004902
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004903 list_for_each_entry(dev, head, unreg_list) {
4904 /* Shutdown queueing discipline. */
4905 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004906
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004907
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004908 /* Notify protocols, that we are about to destroy
4909 this device. They should clean all the things.
4910 */
4911 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4912
Patrick McHardya2835762010-02-26 06:34:51 +00004913 if (!dev->rtnl_link_ops ||
4914 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4915 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4916
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004917 /*
4918 * Flush the unicast and multicast chains
4919 */
4920 dev_unicast_flush(dev);
4921 dev_addr_discard(dev);
4922
4923 if (dev->netdev_ops->ndo_uninit)
4924 dev->netdev_ops->ndo_uninit(dev);
4925
4926 /* Notifier chain MUST detach us from master device. */
4927 WARN_ON(dev->master);
4928
4929 /* Remove entries from kobject tree */
4930 netdev_unregister_kobject(dev);
4931 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004932
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004933 /* Process any work delayed until the end of the batch */
stephen hemmingere5e26d72010-02-24 14:01:38 +00004934 dev = list_first_entry(head, struct net_device, unreg_list);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004935 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4936
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004937 synchronize_net();
4938
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004939 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004940 dev_put(dev);
4941}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004942
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004943static void rollback_registered(struct net_device *dev)
4944{
4945 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004946
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004947 list_add(&dev->unreg_list, &single);
4948 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004949}
4950
David S. Millere8a04642008-07-17 00:34:19 -07004951static void __netdev_init_queue_locks_one(struct net_device *dev,
4952 struct netdev_queue *dev_queue,
4953 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004954{
4955 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004956 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004957 dev_queue->xmit_lock_owner = -1;
4958}
4959
4960static void netdev_init_queue_locks(struct net_device *dev)
4961{
David S. Millere8a04642008-07-17 00:34:19 -07004962 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4963 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004964}
4965
Herbert Xub63365a2008-10-23 01:11:29 -07004966unsigned long netdev_fix_features(unsigned long features, const char *name)
4967{
4968 /* Fix illegal SG+CSUM combinations. */
4969 if ((features & NETIF_F_SG) &&
4970 !(features & NETIF_F_ALL_CSUM)) {
4971 if (name)
4972 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4973 "checksum feature.\n", name);
4974 features &= ~NETIF_F_SG;
4975 }
4976
4977 /* TSO requires that SG is present as well. */
4978 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4979 if (name)
4980 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4981 "SG feature.\n", name);
4982 features &= ~NETIF_F_TSO;
4983 }
4984
4985 if (features & NETIF_F_UFO) {
4986 if (!(features & NETIF_F_GEN_CSUM)) {
4987 if (name)
4988 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4989 "since no NETIF_F_HW_CSUM feature.\n",
4990 name);
4991 features &= ~NETIF_F_UFO;
4992 }
4993
4994 if (!(features & NETIF_F_SG)) {
4995 if (name)
4996 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4997 "since no NETIF_F_SG feature.\n", name);
4998 features &= ~NETIF_F_UFO;
4999 }
5000 }
5001
5002 return features;
5003}
5004EXPORT_SYMBOL(netdev_fix_features);
5005
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005007 * netif_stacked_transfer_operstate - transfer operstate
5008 * @rootdev: the root or lower level device to transfer state from
5009 * @dev: the device to transfer operstate to
5010 *
5011 * Transfer operational state from root to device. This is normally
5012 * called when a stacking relationship exists between the root
5013 * device and the device(a leaf device).
5014 */
5015void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5016 struct net_device *dev)
5017{
5018 if (rootdev->operstate == IF_OPER_DORMANT)
5019 netif_dormant_on(dev);
5020 else
5021 netif_dormant_off(dev);
5022
5023 if (netif_carrier_ok(rootdev)) {
5024 if (!netif_carrier_ok(dev))
5025 netif_carrier_on(dev);
5026 } else {
5027 if (netif_carrier_ok(dev))
5028 netif_carrier_off(dev);
5029 }
5030}
5031EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5032
5033/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034 * register_netdevice - register a network device
5035 * @dev: device to register
5036 *
5037 * Take a completed network device structure and add it to the kernel
5038 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5039 * chain. 0 is returned on success. A negative errno code is returned
5040 * on a failure to set up the device, or if the name is a duplicate.
5041 *
5042 * Callers must hold the rtnl semaphore. You may want
5043 * register_netdev() instead of this.
5044 *
5045 * BUGS:
5046 * The locking appears insufficient to guarantee two parallel registers
5047 * will not get the same name.
5048 */
5049
5050int register_netdevice(struct net_device *dev)
5051{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005053 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054
5055 BUG_ON(dev_boot_phase);
5056 ASSERT_RTNL();
5057
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005058 might_sleep();
5059
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 /* When net_device's are persistent, this will be fatal. */
5061 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005062 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063
David S. Millerf1f28aa2008-07-15 00:08:33 -07005064 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005065 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07005066 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 dev->iflink = -1;
5069
5070 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005071 if (dev->netdev_ops->ndo_init) {
5072 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 if (ret) {
5074 if (ret > 0)
5075 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005076 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077 }
5078 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005079
Octavian Purdilad9031022009-11-18 02:36:59 +00005080 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5081 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005082 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083
Eric W. Biederman881d9662007-09-17 11:56:21 -07005084 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085 if (dev->iflink == -1)
5086 dev->iflink = dev->ifindex;
5087
Stephen Hemmingerd212f872007-06-27 00:47:37 -07005088 /* Fix illegal checksum combinations */
5089 if ((dev->features & NETIF_F_HW_CSUM) &&
5090 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5091 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5092 dev->name);
5093 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5094 }
5095
5096 if ((dev->features & NETIF_F_NO_CSUM) &&
5097 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5098 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5099 dev->name);
5100 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5101 }
5102
Herbert Xub63365a2008-10-23 01:11:29 -07005103 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07005105 /* Enable software GSO if SG is supported. */
5106 if (dev->features & NETIF_F_SG)
5107 dev->features |= NETIF_F_GSO;
5108
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005109 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005110
5111 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5112 ret = notifier_to_errno(ret);
5113 if (ret)
5114 goto err_uninit;
5115
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005116 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005117 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005118 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005119 dev->reg_state = NETREG_REGISTERED;
5120
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 /*
5122 * Default initial state at registry is that the
5123 * device is present.
5124 */
5125
5126 set_bit(__LINK_STATE_PRESENT, &dev->state);
5127
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005130 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131
5132 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005133 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005134 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005135 if (ret) {
5136 rollback_registered(dev);
5137 dev->reg_state = NETREG_UNREGISTERED;
5138 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005139 /*
5140 * Prevent userspace races by waiting until the network
5141 * device is fully setup before sending notifications.
5142 */
Patrick McHardya2835762010-02-26 06:34:51 +00005143 if (!dev->rtnl_link_ops ||
5144 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5145 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146
5147out:
5148 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005149
5150err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005151 if (dev->netdev_ops->ndo_uninit)
5152 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005153 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005155EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156
5157/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005158 * init_dummy_netdev - init a dummy network device for NAPI
5159 * @dev: device to init
5160 *
5161 * This takes a network device structure and initialize the minimum
5162 * amount of fields so it can be used to schedule NAPI polls without
5163 * registering a full blown interface. This is to be used by drivers
5164 * that need to tie several hardware interfaces to a single NAPI
5165 * poll scheduler due to HW limitations.
5166 */
5167int init_dummy_netdev(struct net_device *dev)
5168{
5169 /* Clear everything. Note we don't initialize spinlocks
5170 * are they aren't supposed to be taken by any of the
5171 * NAPI code and this dummy netdev is supposed to be
5172 * only ever used for NAPI polls
5173 */
5174 memset(dev, 0, sizeof(struct net_device));
5175
5176 /* make sure we BUG if trying to hit standard
5177 * register/unregister code path
5178 */
5179 dev->reg_state = NETREG_DUMMY;
5180
5181 /* initialize the ref count */
5182 atomic_set(&dev->refcnt, 1);
5183
5184 /* NAPI wants this */
5185 INIT_LIST_HEAD(&dev->napi_list);
5186
5187 /* a dummy interface is started by default */
5188 set_bit(__LINK_STATE_PRESENT, &dev->state);
5189 set_bit(__LINK_STATE_START, &dev->state);
5190
5191 return 0;
5192}
5193EXPORT_SYMBOL_GPL(init_dummy_netdev);
5194
5195
5196/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 * register_netdev - register a network device
5198 * @dev: device to register
5199 *
5200 * Take a completed network device structure and add it to the kernel
5201 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5202 * chain. 0 is returned on success. A negative errno code is returned
5203 * on a failure to set up the device, or if the name is a duplicate.
5204 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005205 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206 * and expands the device name if you passed a format string to
5207 * alloc_netdev.
5208 */
5209int register_netdev(struct net_device *dev)
5210{
5211 int err;
5212
5213 rtnl_lock();
5214
5215 /*
5216 * If the name is a format string the caller wants us to do a
5217 * name allocation.
5218 */
5219 if (strchr(dev->name, '%')) {
5220 err = dev_alloc_name(dev, dev->name);
5221 if (err < 0)
5222 goto out;
5223 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005224
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225 err = register_netdevice(dev);
5226out:
5227 rtnl_unlock();
5228 return err;
5229}
5230EXPORT_SYMBOL(register_netdev);
5231
5232/*
5233 * netdev_wait_allrefs - wait until all references are gone.
5234 *
5235 * This is called when unregistering network devices.
5236 *
5237 * Any protocol or device that holds a reference should register
5238 * for netdevice notification, and cleanup and put back the
5239 * reference if they receive an UNREGISTER event.
5240 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005241 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 */
5243static void netdev_wait_allrefs(struct net_device *dev)
5244{
5245 unsigned long rebroadcast_time, warning_time;
5246
Eric Dumazete014deb2009-11-17 05:59:21 +00005247 linkwatch_forget_dev(dev);
5248
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 rebroadcast_time = warning_time = jiffies;
5250 while (atomic_read(&dev->refcnt) != 0) {
5251 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005252 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253
5254 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005255 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005256 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005257 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258
5259 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5260 &dev->state)) {
5261 /* We must not have linkwatch events
5262 * pending on unregister. If this
5263 * happens, we simply run the queue
5264 * unscheduled, resulting in a noop
5265 * for this device.
5266 */
5267 linkwatch_run_queue();
5268 }
5269
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005270 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271
5272 rebroadcast_time = jiffies;
5273 }
5274
5275 msleep(250);
5276
5277 if (time_after(jiffies, warning_time + 10 * HZ)) {
5278 printk(KERN_EMERG "unregister_netdevice: "
5279 "waiting for %s to become free. Usage "
5280 "count = %d\n",
5281 dev->name, atomic_read(&dev->refcnt));
5282 warning_time = jiffies;
5283 }
5284 }
5285}
5286
5287/* The sequence is:
5288 *
5289 * rtnl_lock();
5290 * ...
5291 * register_netdevice(x1);
5292 * register_netdevice(x2);
5293 * ...
5294 * unregister_netdevice(y1);
5295 * unregister_netdevice(y2);
5296 * ...
5297 * rtnl_unlock();
5298 * free_netdev(y1);
5299 * free_netdev(y2);
5300 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005301 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005303 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304 * without deadlocking with linkwatch via keventd.
5305 * 2) Since we run with the RTNL semaphore not held, we can sleep
5306 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005307 *
5308 * We must not return until all unregister events added during
5309 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311void netdev_run_todo(void)
5312{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005313 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005316 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005317
5318 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005319
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 while (!list_empty(&list)) {
5321 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005322 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 list_del(&dev->todo_list);
5324
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005325 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326 printk(KERN_ERR "network todo '%s' but state %d\n",
5327 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005328 dump_stack();
5329 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005331
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005332 dev->reg_state = NETREG_UNREGISTERED;
5333
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005334 on_each_cpu(flush_backlog, dev, 1);
5335
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005336 netdev_wait_allrefs(dev);
5337
5338 /* paranoia */
5339 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005340 WARN_ON(dev->ip_ptr);
5341 WARN_ON(dev->ip6_ptr);
5342 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005343
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005344 if (dev->destructor)
5345 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005346
5347 /* Free network device */
5348 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005350}
5351
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005352/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005353 * dev_txq_stats_fold - fold tx_queues stats
5354 * @dev: device to get statistics from
5355 * @stats: struct net_device_stats to hold results
5356 */
5357void dev_txq_stats_fold(const struct net_device *dev,
5358 struct net_device_stats *stats)
5359{
5360 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5361 unsigned int i;
5362 struct netdev_queue *txq;
5363
5364 for (i = 0; i < dev->num_tx_queues; i++) {
5365 txq = netdev_get_tx_queue(dev, i);
5366 tx_bytes += txq->tx_bytes;
5367 tx_packets += txq->tx_packets;
5368 tx_dropped += txq->tx_dropped;
5369 }
5370 if (tx_bytes || tx_packets || tx_dropped) {
5371 stats->tx_bytes = tx_bytes;
5372 stats->tx_packets = tx_packets;
5373 stats->tx_dropped = tx_dropped;
5374 }
5375}
5376EXPORT_SYMBOL(dev_txq_stats_fold);
5377
5378/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005379 * dev_get_stats - get network device statistics
5380 * @dev: device to get statistics from
5381 *
5382 * Get network statistics from device. The device driver may provide
5383 * its own method by setting dev->netdev_ops->get_stats; otherwise
5384 * the internal statistics structure is used.
5385 */
5386const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005387{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005388 const struct net_device_ops *ops = dev->netdev_ops;
5389
5390 if (ops->ndo_get_stats)
5391 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005392
Eric Dumazetd83345a2009-11-16 03:36:51 +00005393 dev_txq_stats_fold(dev, &dev->stats);
5394 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005395}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005396EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005397
David S. Millerdc2b4842008-07-08 17:18:23 -07005398static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005399 struct netdev_queue *queue,
5400 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005401{
David S. Millerdc2b4842008-07-08 17:18:23 -07005402 queue->dev = dev;
5403}
5404
David S. Millerbb949fb2008-07-08 16:55:56 -07005405static void netdev_init_queues(struct net_device *dev)
5406{
David S. Millere8a04642008-07-17 00:34:19 -07005407 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5408 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005409 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005410}
5411
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005413 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 * @sizeof_priv: size of private data to allocate space for
5415 * @name: device name format string
5416 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005417 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418 *
5419 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005420 * and performs basic initialization. Also allocates subquue structs
5421 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005422 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005423struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5424 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425{
David S. Millere8a04642008-07-17 00:34:19 -07005426 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005428 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005429 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005431 BUG_ON(strlen(name) >= sizeof(dev->name));
5432
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005433 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005434 if (sizeof_priv) {
5435 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005436 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005437 alloc_size += sizeof_priv;
5438 }
5439 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005440 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005442 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005444 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 return NULL;
5446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447
Stephen Hemminger79439862008-07-21 13:28:44 -07005448 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005449 if (!tx) {
5450 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5451 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005452 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005453 }
5454
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005455 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005457
5458 if (dev_addr_init(dev))
5459 goto free_tx;
5460
Jiri Pirkoccffad252009-05-22 23:22:17 +00005461 dev_unicast_init(dev);
5462
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005463 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464
David S. Millere8a04642008-07-17 00:34:19 -07005465 dev->_tx = tx;
5466 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005467 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005468
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005469 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
David S. Millerbb949fb2008-07-08 16:55:56 -07005471 netdev_init_queues(dev);
5472
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005473 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5474 dev->ethtool_ntuple_list.count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005475 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005476 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005477 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005478 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479 setup(dev);
5480 strcpy(dev->name, name);
5481 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005482
5483free_tx:
5484 kfree(tx);
5485
5486free_p:
5487 kfree(p);
5488 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005490EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491
5492/**
5493 * free_netdev - free network device
5494 * @dev: device
5495 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005496 * This function does the last stage of destroying an allocated device
5497 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005498 * If this is the last reference then it will be freed.
5499 */
5500void free_netdev(struct net_device *dev)
5501{
Herbert Xud565b0a2008-12-15 23:38:52 -08005502 struct napi_struct *p, *n;
5503
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005504 release_net(dev_net(dev));
5505
David S. Millere8a04642008-07-17 00:34:19 -07005506 kfree(dev->_tx);
5507
Jiri Pirkof001fde2009-05-05 02:48:28 +00005508 /* Flush device addresses */
5509 dev_addr_flush(dev);
5510
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005511 /* Clear ethtool n-tuple list */
5512 ethtool_ntuple_flush(dev);
5513
Herbert Xud565b0a2008-12-15 23:38:52 -08005514 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5515 netif_napi_del(p);
5516
Stephen Hemminger3041a062006-05-26 13:25:24 -07005517 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518 if (dev->reg_state == NETREG_UNINITIALIZED) {
5519 kfree((char *)dev - dev->padded);
5520 return;
5521 }
5522
5523 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5524 dev->reg_state = NETREG_RELEASED;
5525
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005526 /* will free via device release */
5527 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005529EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005530
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005531/**
5532 * synchronize_net - Synchronize with packet receive processing
5533 *
5534 * Wait for packets currently being received to be done.
5535 * Does not block later packets from starting.
5536 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005537void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538{
5539 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005540 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005542EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005543
5544/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005545 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005547 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005548 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005550 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005551 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 *
5553 * Callers must hold the rtnl semaphore. You may want
5554 * unregister_netdev() instead of this.
5555 */
5556
Eric Dumazet44a08732009-10-27 07:03:04 +00005557void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558{
Herbert Xua6620712007-12-12 19:21:56 -08005559 ASSERT_RTNL();
5560
Eric Dumazet44a08732009-10-27 07:03:04 +00005561 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005562 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005563 } else {
5564 rollback_registered(dev);
5565 /* Finish processing unregister after unlock */
5566 net_set_todo(dev);
5567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568}
Eric Dumazet44a08732009-10-27 07:03:04 +00005569EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570
5571/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005572 * unregister_netdevice_many - unregister many devices
5573 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005574 */
5575void unregister_netdevice_many(struct list_head *head)
5576{
5577 struct net_device *dev;
5578
5579 if (!list_empty(head)) {
5580 rollback_registered_many(head);
5581 list_for_each_entry(dev, head, unreg_list)
5582 net_set_todo(dev);
5583 }
5584}
Eric Dumazet63c80992009-10-27 07:06:49 +00005585EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005586
5587/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588 * unregister_netdev - remove device from the kernel
5589 * @dev: device
5590 *
5591 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005592 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005593 *
5594 * This is just a wrapper for unregister_netdevice that takes
5595 * the rtnl semaphore. In general you want to use this and not
5596 * unregister_netdevice.
5597 */
5598void unregister_netdev(struct net_device *dev)
5599{
5600 rtnl_lock();
5601 unregister_netdevice(dev);
5602 rtnl_unlock();
5603}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604EXPORT_SYMBOL(unregister_netdev);
5605
Eric W. Biedermance286d32007-09-12 13:53:49 +02005606/**
5607 * dev_change_net_namespace - move device to different nethost namespace
5608 * @dev: device
5609 * @net: network namespace
5610 * @pat: If not NULL name pattern to try if the current device name
5611 * is already taken in the destination network namespace.
5612 *
5613 * This function shuts down a device interface and moves it
5614 * to a new network namespace. On success 0 is returned, on
5615 * a failure a netagive errno code is returned.
5616 *
5617 * Callers must hold the rtnl semaphore.
5618 */
5619
5620int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5621{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005622 int err;
5623
5624 ASSERT_RTNL();
5625
5626 /* Don't allow namespace local devices to be moved. */
5627 err = -EINVAL;
5628 if (dev->features & NETIF_F_NETNS_LOCAL)
5629 goto out;
5630
Eric W. Biederman38918452008-10-27 17:51:47 -07005631#ifdef CONFIG_SYSFS
5632 /* Don't allow real devices to be moved when sysfs
5633 * is enabled.
5634 */
5635 err = -EINVAL;
5636 if (dev->dev.parent)
5637 goto out;
5638#endif
5639
Eric W. Biedermance286d32007-09-12 13:53:49 +02005640 /* Ensure the device has been registrered */
5641 err = -EINVAL;
5642 if (dev->reg_state != NETREG_REGISTERED)
5643 goto out;
5644
5645 /* Get out if there is nothing todo */
5646 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005647 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005648 goto out;
5649
5650 /* Pick the destination device name, and ensure
5651 * we can use it in the destination network namespace.
5652 */
5653 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005654 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005655 /* We get here if we can't use the current device name */
5656 if (!pat)
5657 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005658 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005659 goto out;
5660 }
5661
5662 /*
5663 * And now a mini version of register_netdevice unregister_netdevice.
5664 */
5665
5666 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005667 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005668
5669 /* And unlink it from device chain */
5670 err = -ENODEV;
5671 unlist_netdevice(dev);
5672
5673 synchronize_net();
5674
5675 /* Shutdown queueing discipline. */
5676 dev_shutdown(dev);
5677
5678 /* Notify protocols, that we are about to destroy
5679 this device. They should clean all the things.
5680 */
5681 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005682 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005683
5684 /*
5685 * Flush the unicast and multicast chains
5686 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005687 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005688 dev_addr_discard(dev);
5689
Eric W. Biederman38918452008-10-27 17:51:47 -07005690 netdev_unregister_kobject(dev);
5691
Eric W. Biedermance286d32007-09-12 13:53:49 +02005692 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005693 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005694
Eric W. Biedermance286d32007-09-12 13:53:49 +02005695 /* If there is an ifindex conflict assign a new one */
5696 if (__dev_get_by_index(net, dev->ifindex)) {
5697 int iflink = (dev->iflink == dev->ifindex);
5698 dev->ifindex = dev_new_index(net);
5699 if (iflink)
5700 dev->iflink = dev->ifindex;
5701 }
5702
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005703 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005704 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005705 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005706
5707 /* Add the device back in the hashes */
5708 list_netdevice(dev);
5709
5710 /* Notify protocols, that a new device appeared. */
5711 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5712
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005713 /*
5714 * Prevent userspace races by waiting until the network
5715 * device is fully setup before sending notifications.
5716 */
5717 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5718
Eric W. Biedermance286d32007-09-12 13:53:49 +02005719 synchronize_net();
5720 err = 0;
5721out:
5722 return err;
5723}
Johannes Berg463d0182009-07-14 00:33:35 +02005724EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005725
Linus Torvalds1da177e2005-04-16 15:20:36 -07005726static int dev_cpu_callback(struct notifier_block *nfb,
5727 unsigned long action,
5728 void *ocpu)
5729{
5730 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005731 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732 struct sk_buff *skb;
5733 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5734 struct softnet_data *sd, *oldsd;
5735
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005736 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005737 return NOTIFY_OK;
5738
5739 local_irq_disable();
5740 cpu = smp_processor_id();
5741 sd = &per_cpu(softnet_data, cpu);
5742 oldsd = &per_cpu(softnet_data, oldcpu);
5743
5744 /* Find end of our completion_queue. */
5745 list_skb = &sd->completion_queue;
5746 while (*list_skb)
5747 list_skb = &(*list_skb)->next;
5748 /* Append completion queue from offline CPU. */
5749 *list_skb = oldsd->completion_queue;
5750 oldsd->completion_queue = NULL;
5751
5752 /* Find end of our output_queue. */
5753 list_net = &sd->output_queue;
5754 while (*list_net)
5755 list_net = &(*list_net)->next_sched;
5756 /* Append output queue from offline CPU. */
5757 *list_net = oldsd->output_queue;
5758 oldsd->output_queue = NULL;
5759
5760 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5761 local_irq_enable();
5762
5763 /* Process offline CPU's input_pkt_queue */
5764 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5765 netif_rx(skb);
5766
5767 return NOTIFY_OK;
5768}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769
5770
Herbert Xu7f353bf2007-08-10 15:47:58 -07005771/**
Herbert Xub63365a2008-10-23 01:11:29 -07005772 * netdev_increment_features - increment feature set by one
5773 * @all: current feature set
5774 * @one: new feature set
5775 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005776 *
5777 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005778 * @one to the master device with current feature set @all. Will not
5779 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005780 */
Herbert Xub63365a2008-10-23 01:11:29 -07005781unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5782 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005783{
Herbert Xub63365a2008-10-23 01:11:29 -07005784 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005785 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005786 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5787 else if (mask & NETIF_F_ALL_CSUM) {
5788 /* If one device supports v4/v6 checksumming, set for all. */
5789 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5790 !(all & NETIF_F_GEN_CSUM)) {
5791 all &= ~NETIF_F_ALL_CSUM;
5792 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5793 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005794
Herbert Xub63365a2008-10-23 01:11:29 -07005795 /* If one device supports hw checksumming, set for all. */
5796 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5797 all &= ~NETIF_F_ALL_CSUM;
5798 all |= NETIF_F_HW_CSUM;
5799 }
5800 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005801
Herbert Xub63365a2008-10-23 01:11:29 -07005802 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005803
Herbert Xub63365a2008-10-23 01:11:29 -07005804 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005805 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005806 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005807
5808 return all;
5809}
Herbert Xub63365a2008-10-23 01:11:29 -07005810EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005811
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005812static struct hlist_head *netdev_create_hash(void)
5813{
5814 int i;
5815 struct hlist_head *hash;
5816
5817 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5818 if (hash != NULL)
5819 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5820 INIT_HLIST_HEAD(&hash[i]);
5821
5822 return hash;
5823}
5824
Eric W. Biederman881d9662007-09-17 11:56:21 -07005825/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005826static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005827{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005828 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005829
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005830 net->dev_name_head = netdev_create_hash();
5831 if (net->dev_name_head == NULL)
5832 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005833
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005834 net->dev_index_head = netdev_create_hash();
5835 if (net->dev_index_head == NULL)
5836 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005837
5838 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005839
5840err_idx:
5841 kfree(net->dev_name_head);
5842err_name:
5843 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005844}
5845
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005846/**
5847 * netdev_drivername - network driver for the device
5848 * @dev: network device
5849 * @buffer: buffer for resulting name
5850 * @len: size of buffer
5851 *
5852 * Determine network driver for device.
5853 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005854char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005855{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005856 const struct device_driver *driver;
5857 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005858
5859 if (len <= 0 || !buffer)
5860 return buffer;
5861 buffer[0] = 0;
5862
5863 parent = dev->dev.parent;
5864
5865 if (!parent)
5866 return buffer;
5867
5868 driver = parent->driver;
5869 if (driver && driver->name)
5870 strlcpy(buffer, driver->name, len);
5871 return buffer;
5872}
5873
Pavel Emelyanov46650792007-10-08 20:38:39 -07005874static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005875{
5876 kfree(net->dev_name_head);
5877 kfree(net->dev_index_head);
5878}
5879
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005880static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005881 .init = netdev_init,
5882 .exit = netdev_exit,
5883};
5884
Pavel Emelyanov46650792007-10-08 20:38:39 -07005885static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005886{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005887 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005888 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005889 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02005890 * initial network namespace
5891 */
5892 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005893 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005894 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005895 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005896
5897 /* Ignore unmoveable devices (i.e. loopback) */
5898 if (dev->features & NETIF_F_NETNS_LOCAL)
5899 continue;
5900
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005901 /* Leave virtual devices for the generic cleanup */
5902 if (dev->rtnl_link_ops)
5903 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005904
Eric W. Biedermance286d32007-09-12 13:53:49 +02005905 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005906 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5907 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005908 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005909 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005910 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005911 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005912 }
5913 }
5914 rtnl_unlock();
5915}
5916
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005917static void __net_exit default_device_exit_batch(struct list_head *net_list)
5918{
5919 /* At exit all network devices most be removed from a network
5920 * namespace. Do this in the reverse order of registeration.
5921 * Do this across as many network namespaces as possible to
5922 * improve batching efficiency.
5923 */
5924 struct net_device *dev;
5925 struct net *net;
5926 LIST_HEAD(dev_kill_list);
5927
5928 rtnl_lock();
5929 list_for_each_entry(net, net_list, exit_list) {
5930 for_each_netdev_reverse(net, dev) {
5931 if (dev->rtnl_link_ops)
5932 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5933 else
5934 unregister_netdevice_queue(dev, &dev_kill_list);
5935 }
5936 }
5937 unregister_netdevice_many(&dev_kill_list);
5938 rtnl_unlock();
5939}
5940
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005941static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005942 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005943 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02005944};
5945
Linus Torvalds1da177e2005-04-16 15:20:36 -07005946/*
5947 * Initialize the DEV module. At boot time this walks the device list and
5948 * unhooks any devices that fail to initialise (normally hardware not
5949 * present) and leaves us with a valid list of present and active devices.
5950 *
5951 */
5952
5953/*
5954 * This is called single threaded during boot, so no need
5955 * to take the rtnl semaphore.
5956 */
5957static int __init net_dev_init(void)
5958{
5959 int i, rc = -ENOMEM;
5960
5961 BUG_ON(!dev_boot_phase);
5962
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963 if (dev_proc_init())
5964 goto out;
5965
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005966 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967 goto out;
5968
5969 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005970 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005971 INIT_LIST_HEAD(&ptype_base[i]);
5972
Eric W. Biederman881d9662007-09-17 11:56:21 -07005973 if (register_pernet_subsys(&netdev_net_ops))
5974 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005975
5976 /*
5977 * Initialise the packet receive queues.
5978 */
5979
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005980 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005981 struct softnet_data *queue;
5982
5983 queue = &per_cpu(softnet_data, i);
5984 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005985 queue->completion_queue = NULL;
5986 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005987
5988 queue->backlog.poll = process_backlog;
5989 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005990 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005991 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005992 }
5993
Linus Torvalds1da177e2005-04-16 15:20:36 -07005994 dev_boot_phase = 0;
5995
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005996 /* The loopback device is special if any other network devices
5997 * is present in a network namespace the loopback device must
5998 * be present. Since we now dynamically allocate and free the
5999 * loopback device ensure this invariant is maintained by
6000 * keeping the loopback device as the first device on the
6001 * list of network devices. Ensuring the loopback devices
6002 * is the first device that appears and the last network device
6003 * that disappears.
6004 */
6005 if (register_pernet_device(&loopback_net_ops))
6006 goto out;
6007
6008 if (register_pernet_device(&default_device_ops))
6009 goto out;
6010
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006011 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6012 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006013
6014 hotcpu_notifier(dev_cpu_callback, 0);
6015 dst_init();
6016 dev_mcast_init();
6017 rc = 0;
6018out:
6019 return rc;
6020}
6021
6022subsys_initcall(net_dev_init);
6023
Krishna Kumare88721f2009-02-18 17:55:02 -08006024static int __init initialize_hashrnd(void)
6025{
6026 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
6027 return 0;
6028}
6029
6030late_initcall_sync(initialize_hashrnd);
6031