blob: 1c8a0ce473a860a28a19e4fd9b3943a1f2752a7b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090083#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080085#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070095#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <linux/notifier.h>
97#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020098#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
104#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700105#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#include <net/dst.h>
107#include <net/pkt_sched.h>
108#include <net/checksum.h>
Arnd Bergmann44540962009-11-26 06:07:08 +0000109#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/kmod.h>
113#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700117#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500120#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700121#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700122#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700123#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700124#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700125#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700126#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700127#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700128#include <linux/ipv6.h>
129#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700130#include <linux/jhash.h>
131#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700132#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700134#include "net-sysfs.h"
135
Herbert Xud565b0a2008-12-15 23:38:52 -0800136/* Instead of increasing this, you should create a hash table. */
137#define MAX_GRO_SKBS 8
138
Herbert Xu5d38a072009-01-04 16:13:40 -0800139/* This should be increased if a protocol with a bigger head is added. */
140#define GRO_MAX_HEAD (MAX_HEADER + 128)
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/*
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
145 *
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
148 *
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700153 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 * --BLG
155 *
156 * 0800 IP
157 * 8100 802.1Q VLAN
158 * 0001 802.3
159 * 0002 AX.25
160 * 0004 802.2
161 * 8035 RARP
162 * 0005 SNAP
163 * 0805 X.25
164 * 0806 ARP
165 * 8137 IPX
166 * 0009 Localtalk
167 * 86DD IPv6
168 */
169
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800170#define PTYPE_HASH_SIZE (16)
171#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800174static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700175static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 * semaphore.
180 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 *
183 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700184 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
187 *
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
191 *
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
194 * semaphore held.
195 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197EXPORT_SYMBOL(dev_base_lock);
198
Eric W. Biederman881d9662007-09-17 11:56:21 -0700199static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
Eric W. Biedermance286d32007-09-12 13:53:49 +0200210/* Device list insertion */
211static int list_netdevice(struct net_device *dev)
212{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900213 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214
215 ASSERT_RTNL();
216
217 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800218 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000219 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000220 hlist_add_head_rcu(&dev->index_hlist,
221 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000226/* Device list removal
227 * caller must respect a RCU grace period before freeing/reusing dev
228 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200229static void unlist_netdevice(struct net_device *dev)
230{
231 ASSERT_RTNL();
232
233 /* Unlink dev from the device chain */
234 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800235 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000236 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000237 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200238 write_unlock_bh(&dev_base_lock);
239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241/*
242 * Our notifier list
243 */
244
Alan Sternf07d5b92006-05-09 15:23:03 -0700245static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247/*
248 * Device drivers call our routines to queue packets here. We empty the
249 * queue in the local softnet handler.
250 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700251
252DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700253EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
David S. Millercf508b12008-07-22 14:16:42 -0700255#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700256/*
David S. Millerc773e842008-07-08 23:13:53 -0700257 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700258 * according to dev->type
259 */
260static const unsigned short netdev_lock_type[] =
261 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
262 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
263 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
264 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
265 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
266 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
267 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
268 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
269 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
270 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
271 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
272 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
273 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800274 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400275 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000276 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700277
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700278static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700279 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
280 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
281 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
282 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
283 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
284 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
285 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
286 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
287 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
288 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
289 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
290 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
291 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800292 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400293 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000294 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700297static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700298
299static inline unsigned short netdev_lock_pos(unsigned short dev_type)
300{
301 int i;
302
303 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
304 if (netdev_lock_type[i] == dev_type)
305 return i;
306 /* the last key is used by default */
307 return ARRAY_SIZE(netdev_lock_type) - 1;
308}
309
David S. Millercf508b12008-07-22 14:16:42 -0700310static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
311 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700312{
313 int i;
314
315 i = netdev_lock_pos(dev_type);
316 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
317 netdev_lock_name[i]);
318}
David S. Millercf508b12008-07-22 14:16:42 -0700319
320static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
321{
322 int i;
323
324 i = netdev_lock_pos(dev->type);
325 lockdep_set_class_and_name(&dev->addr_list_lock,
326 &netdev_addr_lock_key[i],
327 netdev_lock_name[i]);
328}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700329#else
David S. Millercf508b12008-07-22 14:16:42 -0700330static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
331 unsigned short dev_type)
332{
333}
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700335{
336}
337#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339/*******************************************************************************
340
341 Protocol management and registration routines
342
343*******************************************************************************/
344
345/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 * Add a protocol ID to the list. Now that the input handler is
347 * smarter we can dispense with all the messy stuff that used to be
348 * here.
349 *
350 * BEWARE!!! Protocol handlers, mangling input packets,
351 * MUST BE last in hash buckets and checking protocol handlers
352 * MUST start from promiscuous ptype_all chain in net_bh.
353 * It is true now, do not change it.
354 * Explanation follows: if protocol handler, mangling packet, will
355 * be the first on list, it is not able to sense, that packet
356 * is cloned and should be copied-on-write, so that it will
357 * change it and subsequent readers will get broken packet.
358 * --ANK (980803)
359 */
360
361/**
362 * dev_add_pack - add packet handler
363 * @pt: packet type declaration
364 *
365 * Add a protocol handler to the networking stack. The passed &packet_type
366 * is linked into kernel lists and may not be freed until it has been
367 * removed from the kernel lists.
368 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900369 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 * guarantee all CPU's that are in middle of receiving packets
371 * will see the new packet type (until the next received packet).
372 */
373
374void dev_add_pack(struct packet_type *pt)
375{
376 int hash;
377
378 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700379 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700381 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800382 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 list_add_rcu(&pt->list, &ptype_base[hash]);
384 }
385 spin_unlock_bh(&ptype_lock);
386}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700387EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389/**
390 * __dev_remove_pack - remove packet handler
391 * @pt: packet type declaration
392 *
393 * Remove a protocol handler that was previously added to the kernel
394 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
395 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900396 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 *
398 * The packet type might still be in use by receivers
399 * and must not be freed until after all the CPU's have gone
400 * through a quiescent state.
401 */
402void __dev_remove_pack(struct packet_type *pt)
403{
404 struct list_head *head;
405 struct packet_type *pt1;
406
407 spin_lock_bh(&ptype_lock);
408
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700409 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700411 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800412 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 list_for_each_entry(pt1, head, list) {
415 if (pt == pt1) {
416 list_del_rcu(&pt->list);
417 goto out;
418 }
419 }
420
421 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
422out:
423 spin_unlock_bh(&ptype_lock);
424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700425EXPORT_SYMBOL(__dev_remove_pack);
426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427/**
428 * dev_remove_pack - remove packet handler
429 * @pt: packet type declaration
430 *
431 * Remove a protocol handler that was previously added to the kernel
432 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
433 * from the kernel lists and can be freed or reused once this function
434 * returns.
435 *
436 * This call sleeps to guarantee that no CPU is looking at the packet
437 * type after return.
438 */
439void dev_remove_pack(struct packet_type *pt)
440{
441 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 synchronize_net();
444}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700445EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447/******************************************************************************
448
449 Device Boot-time Settings Routines
450
451*******************************************************************************/
452
453/* Boot time configuration table */
454static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
455
456/**
457 * netdev_boot_setup_add - add new setup entry
458 * @name: name of the device
459 * @map: configured settings for the device
460 *
461 * Adds new setup entry to the dev_boot_setup list. The function
462 * returns 0 on error and 1 on success. This is a generic routine to
463 * all netdevices.
464 */
465static int netdev_boot_setup_add(char *name, struct ifmap *map)
466{
467 struct netdev_boot_setup *s;
468 int i;
469
470 s = dev_boot_setup;
471 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
472 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
473 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700474 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 memcpy(&s[i].map, map, sizeof(s[i].map));
476 break;
477 }
478 }
479
480 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
481}
482
483/**
484 * netdev_boot_setup_check - check boot time settings
485 * @dev: the netdevice
486 *
487 * Check boot time settings for the device.
488 * The found settings are set for the device to be used
489 * later in the device probing.
490 * Returns 0 if no settings found, 1 if they are.
491 */
492int netdev_boot_setup_check(struct net_device *dev)
493{
494 struct netdev_boot_setup *s = dev_boot_setup;
495 int i;
496
497 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
498 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700499 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 dev->irq = s[i].map.irq;
501 dev->base_addr = s[i].map.base_addr;
502 dev->mem_start = s[i].map.mem_start;
503 dev->mem_end = s[i].map.mem_end;
504 return 1;
505 }
506 }
507 return 0;
508}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700509EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
511
512/**
513 * netdev_boot_base - get address from boot time settings
514 * @prefix: prefix for network device
515 * @unit: id for network device
516 *
517 * Check boot time settings for the base address of device.
518 * The found settings are set for the device to be used
519 * later in the device probing.
520 * Returns 0 if no settings found.
521 */
522unsigned long netdev_boot_base(const char *prefix, int unit)
523{
524 const struct netdev_boot_setup *s = dev_boot_setup;
525 char name[IFNAMSIZ];
526 int i;
527
528 sprintf(name, "%s%d", prefix, unit);
529
530 /*
531 * If device already registered then return base of 1
532 * to indicate not to probe for this interface
533 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700534 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 return 1;
536
537 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
538 if (!strcmp(name, s[i].name))
539 return s[i].map.base_addr;
540 return 0;
541}
542
543/*
544 * Saves at boot time configured settings for any netdevice.
545 */
546int __init netdev_boot_setup(char *str)
547{
548 int ints[5];
549 struct ifmap map;
550
551 str = get_options(str, ARRAY_SIZE(ints), ints);
552 if (!str || !*str)
553 return 0;
554
555 /* Save settings */
556 memset(&map, 0, sizeof(map));
557 if (ints[0] > 0)
558 map.irq = ints[1];
559 if (ints[0] > 1)
560 map.base_addr = ints[2];
561 if (ints[0] > 2)
562 map.mem_start = ints[3];
563 if (ints[0] > 3)
564 map.mem_end = ints[4];
565
566 /* Add new entry to the list */
567 return netdev_boot_setup_add(str, &map);
568}
569
570__setup("netdev=", netdev_boot_setup);
571
572/*******************************************************************************
573
574 Device Interface Subroutines
575
576*******************************************************************************/
577
578/**
579 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700580 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 * @name: name to find
582 *
583 * Find an interface by name. Must be called under RTNL semaphore
584 * or @dev_base_lock. If the name is found a pointer to the device
585 * is returned. If the name is not found then %NULL is returned. The
586 * reference counters are not incremented so the caller must be
587 * careful with locks.
588 */
589
Eric W. Biederman881d9662007-09-17 11:56:21 -0700590struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591{
592 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700593 struct net_device *dev;
594 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700596 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 if (!strncmp(dev->name, name, IFNAMSIZ))
598 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return NULL;
601}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700602EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000605 * dev_get_by_name_rcu - find a device by its name
606 * @net: the applicable net namespace
607 * @name: name to find
608 *
609 * Find an interface by name.
610 * If the name is found a pointer to the device is returned.
611 * If the name is not found then %NULL is returned.
612 * The reference counters are not incremented so the caller must be
613 * careful with locks. The caller must hold RCU lock.
614 */
615
616struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
617{
618 struct hlist_node *p;
619 struct net_device *dev;
620 struct hlist_head *head = dev_name_hash(net, name);
621
622 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
623 if (!strncmp(dev->name, name, IFNAMSIZ))
624 return dev;
625
626 return NULL;
627}
628EXPORT_SYMBOL(dev_get_by_name_rcu);
629
630/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700632 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 * @name: name to find
634 *
635 * Find an interface by name. This can be called from any
636 * context and does its own locking. The returned handle has
637 * the usage count incremented and the caller must use dev_put() to
638 * release it when it is no longer needed. %NULL is returned if no
639 * matching device is found.
640 */
641
Eric W. Biederman881d9662007-09-17 11:56:21 -0700642struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
644 struct net_device *dev;
645
Eric Dumazet72c95282009-10-30 07:11:27 +0000646 rcu_read_lock();
647 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (dev)
649 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000650 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return dev;
652}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700653EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655/**
656 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700657 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 * @ifindex: index of device
659 *
660 * Search for an interface by index. Returns %NULL if the device
661 * is not found or a pointer to the device. The device has not
662 * had its reference counter increased so the caller must be careful
663 * about locking. The caller must hold either the RTNL semaphore
664 * or @dev_base_lock.
665 */
666
Eric W. Biederman881d9662007-09-17 11:56:21 -0700667struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
669 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700670 struct net_device *dev;
671 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (dev->ifindex == ifindex)
675 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 return NULL;
678}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700679EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000681/**
682 * dev_get_by_index_rcu - find a device by its ifindex
683 * @net: the applicable net namespace
684 * @ifindex: index of device
685 *
686 * Search for an interface by index. Returns %NULL if the device
687 * is not found or a pointer to the device. The device has not
688 * had its reference counter increased so the caller must be careful
689 * about locking. The caller must hold RCU lock.
690 */
691
692struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
693{
694 struct hlist_node *p;
695 struct net_device *dev;
696 struct hlist_head *head = dev_index_hash(net, ifindex);
697
698 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
699 if (dev->ifindex == ifindex)
700 return dev;
701
702 return NULL;
703}
704EXPORT_SYMBOL(dev_get_by_index_rcu);
705
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707/**
708 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700709 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 * @ifindex: index of device
711 *
712 * Search for an interface by index. Returns NULL if the device
713 * is not found or a pointer to the device. The device returned has
714 * had a reference added and the pointer is safe until the user calls
715 * dev_put to indicate they have finished with it.
716 */
717
Eric W. Biederman881d9662007-09-17 11:56:21 -0700718struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
720 struct net_device *dev;
721
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000722 rcu_read_lock();
723 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 if (dev)
725 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000726 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 return dev;
728}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700729EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731/**
732 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700733 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 * @type: media type of device
735 * @ha: hardware address
736 *
737 * Search for an interface by MAC address. Returns NULL if the device
738 * is not found or a pointer to the device. The caller must hold the
739 * rtnl semaphore. The returned device has not had its ref count increased
740 * and the caller must therefore be careful about locking
741 *
742 * BUGS:
743 * If the API was consistent this would be __dev_get_by_hwaddr
744 */
745
Eric W. Biederman881d9662007-09-17 11:56:21 -0700746struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
748 struct net_device *dev;
749
750 ASSERT_RTNL();
751
Denis V. Lunev81103a52007-12-12 10:47:38 -0800752 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (dev->type == type &&
754 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700755 return dev;
756
757 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300759EXPORT_SYMBOL(dev_getbyhwaddr);
760
Eric W. Biederman881d9662007-09-17 11:56:21 -0700761struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700762{
763 struct net_device *dev;
764
765 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700766 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700767 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700768 return dev;
769
770 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700771}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700772EXPORT_SYMBOL(__dev_getfirstbyhwtype);
773
Eric W. Biederman881d9662007-09-17 11:56:21 -0700774struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
776 struct net_device *dev;
777
778 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700779 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700780 if (dev)
781 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 rtnl_unlock();
783 return dev;
784}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785EXPORT_SYMBOL(dev_getfirstbyhwtype);
786
787/**
788 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700789 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 * @if_flags: IFF_* values
791 * @mask: bitmask of bits in if_flags to check
792 *
793 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900794 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 * had a reference added and the pointer is safe until the user calls
796 * dev_put to indicate they have finished with it.
797 */
798
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700799struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
800 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700802 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Pavel Emelianov7562f872007-05-03 15:13:45 -0700804 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800805 rcu_read_lock();
806 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (((dev->flags ^ if_flags) & mask) == 0) {
808 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700809 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 break;
811 }
812 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800813 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700814 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700816EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818/**
819 * dev_valid_name - check if name is okay for network device
820 * @name: name string
821 *
822 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700823 * to allow sysfs to work. We also disallow any kind of
824 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800826int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700828 if (*name == '\0')
829 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700830 if (strlen(name) >= IFNAMSIZ)
831 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700832 if (!strcmp(name, ".") || !strcmp(name, ".."))
833 return 0;
834
835 while (*name) {
836 if (*name == '/' || isspace(*name))
837 return 0;
838 name++;
839 }
840 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700842EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 * __dev_alloc_name - allocate a name for a device
846 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200848 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 *
850 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700851 * id. It scans list of devices to build up a free map, then chooses
852 * the first empty slot. The caller must hold the dev_base or rtnl lock
853 * while allocating the name and adding the device in order to avoid
854 * duplicates.
855 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
856 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 */
858
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200859static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
861 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 const char *p;
863 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700864 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 struct net_device *d;
866
867 p = strnchr(name, IFNAMSIZ-1, '%');
868 if (p) {
869 /*
870 * Verify the string as this thing may have come from
871 * the user. There must be either one "%d" and no other "%"
872 * characters.
873 */
874 if (p[1] != 'd' || strchr(p + 2, '%'))
875 return -EINVAL;
876
877 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700878 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 if (!inuse)
880 return -ENOMEM;
881
Eric W. Biederman881d9662007-09-17 11:56:21 -0700882 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 if (!sscanf(d->name, name, &i))
884 continue;
885 if (i < 0 || i >= max_netdevices)
886 continue;
887
888 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200889 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 if (!strncmp(buf, d->name, IFNAMSIZ))
891 set_bit(i, inuse);
892 }
893
894 i = find_first_zero_bit(inuse, max_netdevices);
895 free_page((unsigned long) inuse);
896 }
897
Octavian Purdilad9031022009-11-18 02:36:59 +0000898 if (buf != name)
899 snprintf(buf, IFNAMSIZ, name, i);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200900 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
903 /* It is possible to run out of possible slots
904 * when the name is long and there isn't enough space left
905 * for the digits, or if all bits are used.
906 */
907 return -ENFILE;
908}
909
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200910/**
911 * dev_alloc_name - allocate a name for a device
912 * @dev: device
913 * @name: name format string
914 *
915 * Passed a format string - eg "lt%d" it will try and find a suitable
916 * id. It scans list of devices to build up a free map, then chooses
917 * the first empty slot. The caller must hold the dev_base or rtnl lock
918 * while allocating the name and adding the device in order to avoid
919 * duplicates.
920 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
921 * Returns the number of the unit assigned or a negative errno code.
922 */
923
924int dev_alloc_name(struct net_device *dev, const char *name)
925{
926 char buf[IFNAMSIZ];
927 struct net *net;
928 int ret;
929
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900930 BUG_ON(!dev_net(dev));
931 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200932 ret = __dev_alloc_name(net, name, buf);
933 if (ret >= 0)
934 strlcpy(dev->name, buf, IFNAMSIZ);
935 return ret;
936}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700937EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200938
Octavian Purdilad9031022009-11-18 02:36:59 +0000939static int dev_get_valid_name(struct net *net, const char *name, char *buf,
940 bool fmt)
941{
942 if (!dev_valid_name(name))
943 return -EINVAL;
944
945 if (fmt && strchr(name, '%'))
946 return __dev_alloc_name(net, name, buf);
947 else if (__dev_get_by_name(net, name))
948 return -EEXIST;
949 else if (buf != name)
950 strlcpy(buf, name, IFNAMSIZ);
951
952 return 0;
953}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955/**
956 * dev_change_name - change name of a device
957 * @dev: device
958 * @newname: name (or format string) must be at least IFNAMSIZ
959 *
960 * Change name of a device, can pass format strings "eth%d".
961 * for wildcarding.
962 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700963int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
Herbert Xufcc5a032007-07-30 17:03:38 -0700965 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700967 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700968 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900971 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900973 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 if (dev->flags & IFF_UP)
975 return -EBUSY;
976
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700977 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
978 return 0;
979
Herbert Xufcc5a032007-07-30 17:03:38 -0700980 memcpy(oldname, dev->name, IFNAMSIZ);
981
Octavian Purdilad9031022009-11-18 02:36:59 +0000982 err = dev_get_valid_name(net, newname, dev->name, 1);
983 if (err < 0)
984 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Herbert Xufcc5a032007-07-30 17:03:38 -0700986rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700987 /* For now only devices in the initial network namespace
988 * are in sysfs.
989 */
Octavian Purdila09ad9bc2009-11-25 15:14:13 -0800990 if (net_eq(net, &init_net)) {
Eric W. Biederman38918452008-10-27 17:51:47 -0700991 ret = device_rename(&dev->dev, dev->name);
992 if (ret) {
993 memcpy(dev->name, oldname, IFNAMSIZ);
994 return ret;
995 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700996 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700997
998 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600999 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +00001000 write_unlock_bh(&dev_base_lock);
1001
1002 synchronize_rcu();
1003
1004 write_lock_bh(&dev_base_lock);
1005 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -07001006 write_unlock_bh(&dev_base_lock);
1007
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001008 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001009 ret = notifier_to_errno(ret);
1010
1011 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001012 /* err >= 0 after dev_alloc_name() or stores the first errno */
1013 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001014 err = ret;
1015 memcpy(dev->name, oldname, IFNAMSIZ);
1016 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001017 } else {
1018 printk(KERN_ERR
1019 "%s: name change rollback failed: %d.\n",
1020 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001021 }
1022 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 return err;
1025}
1026
1027/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001028 * dev_set_alias - change ifalias of a device
1029 * @dev: device
1030 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001031 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001032 *
1033 * Set ifalias for a device,
1034 */
1035int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1036{
1037 ASSERT_RTNL();
1038
1039 if (len >= IFALIASZ)
1040 return -EINVAL;
1041
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001042 if (!len) {
1043 if (dev->ifalias) {
1044 kfree(dev->ifalias);
1045 dev->ifalias = NULL;
1046 }
1047 return 0;
1048 }
1049
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001050 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001051 if (!dev->ifalias)
1052 return -ENOMEM;
1053
1054 strlcpy(dev->ifalias, alias, len+1);
1055 return len;
1056}
1057
1058
1059/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001060 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001061 * @dev: device to cause notification
1062 *
1063 * Called to indicate a device has changed features.
1064 */
1065void netdev_features_change(struct net_device *dev)
1066{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001067 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001068}
1069EXPORT_SYMBOL(netdev_features_change);
1070
1071/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 * netdev_state_change - device changes state
1073 * @dev: device to cause notification
1074 *
1075 * Called to indicate a device has changed state. This function calls
1076 * the notifier chains for netdev_chain and sends a NEWLINK message
1077 * to the routing socket.
1078 */
1079void netdev_state_change(struct net_device *dev)
1080{
1081 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001082 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1084 }
1085}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001086EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Moni Shoua75c78502009-09-15 02:37:40 -07001088void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001089{
Moni Shoua75c78502009-09-15 02:37:40 -07001090 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001091}
1092EXPORT_SYMBOL(netdev_bonding_change);
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094/**
1095 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001096 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * @name: name of interface
1098 *
1099 * If a network interface is not present and the process has suitable
1100 * privileges this function loads the module. If module loading is not
1101 * available in this kernel then it becomes a nop.
1102 */
1103
Eric W. Biederman881d9662007-09-17 11:56:21 -07001104void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001106 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
Eric Dumazet72c95282009-10-30 07:11:27 +00001108 rcu_read_lock();
1109 dev = dev_get_by_name_rcu(net, name);
1110 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
Eric Parisa8f80e82009-08-13 09:44:51 -04001112 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 request_module("%s", name);
1114}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001115EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Patrick McHardybd380812010-02-26 06:34:53 +00001117static int __dev_open(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001119 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001120 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001122 ASSERT_RTNL();
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 * Is it even present?
1126 */
1127 if (!netif_device_present(dev))
1128 return -ENODEV;
1129
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001130 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1131 ret = notifier_to_errno(ret);
1132 if (ret)
1133 return ret;
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 /*
1136 * Call device private open method
1137 */
1138 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001139
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001140 if (ops->ndo_validate_addr)
1141 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001142
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001143 if (!ret && ops->ndo_open)
1144 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001146 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 * If it went open OK then:
1148 */
1149
Jeff Garzikbada3392007-10-23 20:19:37 -07001150 if (ret)
1151 clear_bit(__LINK_STATE_START, &dev->state);
1152 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 /*
1154 * Set the flags.
1155 */
1156 dev->flags |= IFF_UP;
1157
1158 /*
Dan Williams649274d2009-01-11 00:20:39 -08001159 * Enable NET_DMA
1160 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001161 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001162
1163 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 * Initialize multicasting status
1165 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001166 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 /*
1169 * Wakeup transmit queue engine
1170 */
1171 dev_activate(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 return ret;
1175}
Patrick McHardybd380812010-02-26 06:34:53 +00001176
1177/**
1178 * dev_open - prepare an interface for use.
1179 * @dev: device to open
1180 *
1181 * Takes a device from down to up state. The device's private open
1182 * function is invoked and then the multicast lists are loaded. Finally
1183 * the device is moved into the up state and a %NETDEV_UP message is
1184 * sent to the netdev notifier chain.
1185 *
1186 * Calling this function on an active interface is a nop. On a failure
1187 * a negative errno code is returned.
1188 */
1189int dev_open(struct net_device *dev)
1190{
1191 int ret;
1192
1193 /*
1194 * Is it already up?
1195 */
1196 if (dev->flags & IFF_UP)
1197 return 0;
1198
1199 /*
1200 * Open device
1201 */
1202 ret = __dev_open(dev);
1203 if (ret < 0)
1204 return ret;
1205
1206 /*
1207 * ... and announce new interface.
1208 */
1209 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1210 call_netdevice_notifiers(NETDEV_UP, dev);
1211
1212 return ret;
1213}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001214EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Patrick McHardybd380812010-02-26 06:34:53 +00001216static int __dev_close(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001218 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardybd380812010-02-26 06:34:53 +00001219
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001220 ASSERT_RTNL();
David S. Miller9d5010d2007-09-12 14:33:25 +02001221 might_sleep();
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 /*
1224 * Tell people we are going down, so that they can
1225 * prepare to death, when device is still operating.
1226 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001227 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 clear_bit(__LINK_STATE_START, &dev->state);
1230
1231 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001232 * it can be even on different cpu. So just clear netif_running().
1233 *
1234 * dev->stop() will invoke napi_disable() on all of it's
1235 * napi_struct instances on this device.
1236 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001239 dev_deactivate(dev);
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 /*
1242 * Call the device specific close. This cannot fail.
1243 * Only if device is UP
1244 *
1245 * We allow it to be called even after a DETACH hot-plug
1246 * event.
1247 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001248 if (ops->ndo_stop)
1249 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 /*
1252 * Device is now down.
1253 */
1254
1255 dev->flags &= ~IFF_UP;
1256
1257 /*
Dan Williams649274d2009-01-11 00:20:39 -08001258 * Shutdown NET_DMA
1259 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001260 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001261
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 return 0;
1263}
Patrick McHardybd380812010-02-26 06:34:53 +00001264
1265/**
1266 * dev_close - shutdown an interface.
1267 * @dev: device to shutdown
1268 *
1269 * This function moves an active device into down state. A
1270 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1271 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1272 * chain.
1273 */
1274int dev_close(struct net_device *dev)
1275{
1276 if (!(dev->flags & IFF_UP))
1277 return 0;
1278
1279 __dev_close(dev);
1280
1281 /*
1282 * Tell people we are down
1283 */
1284 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1285 call_netdevice_notifiers(NETDEV_DOWN, dev);
1286
1287 return 0;
1288}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001289EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001292/**
1293 * dev_disable_lro - disable Large Receive Offload on a device
1294 * @dev: device
1295 *
1296 * Disable Large Receive Offload (LRO) on a net device. Must be
1297 * called under RTNL. This is needed if received packets may be
1298 * forwarded to another interface.
1299 */
1300void dev_disable_lro(struct net_device *dev)
1301{
1302 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1303 dev->ethtool_ops->set_flags) {
1304 u32 flags = dev->ethtool_ops->get_flags(dev);
1305 if (flags & ETH_FLAG_LRO) {
1306 flags &= ~ETH_FLAG_LRO;
1307 dev->ethtool_ops->set_flags(dev, flags);
1308 }
1309 }
1310 WARN_ON(dev->features & NETIF_F_LRO);
1311}
1312EXPORT_SYMBOL(dev_disable_lro);
1313
1314
Eric W. Biederman881d9662007-09-17 11:56:21 -07001315static int dev_boot_phase = 1;
1316
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317/*
1318 * Device change register/unregister. These are not inline or static
1319 * as we export them to the world.
1320 */
1321
1322/**
1323 * register_netdevice_notifier - register a network notifier block
1324 * @nb: notifier
1325 *
1326 * Register a notifier to be called when network device events occur.
1327 * The notifier passed is linked into the kernel structures and must
1328 * not be reused until it has been unregistered. A negative errno code
1329 * is returned on a failure.
1330 *
1331 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001332 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 * view of the network device list.
1334 */
1335
1336int register_netdevice_notifier(struct notifier_block *nb)
1337{
1338 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001339 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001340 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 int err;
1342
1343 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001344 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001345 if (err)
1346 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001347 if (dev_boot_phase)
1348 goto unlock;
1349 for_each_net(net) {
1350 for_each_netdev(net, dev) {
1351 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1352 err = notifier_to_errno(err);
1353 if (err)
1354 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
Eric W. Biederman881d9662007-09-17 11:56:21 -07001356 if (!(dev->flags & IFF_UP))
1357 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001358
Eric W. Biederman881d9662007-09-17 11:56:21 -07001359 nb->notifier_call(nb, NETDEV_UP, dev);
1360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001362
1363unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 rtnl_unlock();
1365 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001366
1367rollback:
1368 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001369 for_each_net(net) {
1370 for_each_netdev(net, dev) {
1371 if (dev == last)
1372 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001373
Eric W. Biederman881d9662007-09-17 11:56:21 -07001374 if (dev->flags & IFF_UP) {
1375 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1376 nb->notifier_call(nb, NETDEV_DOWN, dev);
1377 }
1378 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00001379 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001380 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001381 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001382
1383 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001384 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001386EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388/**
1389 * unregister_netdevice_notifier - unregister a network notifier block
1390 * @nb: notifier
1391 *
1392 * Unregister a notifier previously registered by
1393 * register_netdevice_notifier(). The notifier is unlinked into the
1394 * kernel structures and may then be reused. A negative errno code
1395 * is returned on a failure.
1396 */
1397
1398int unregister_netdevice_notifier(struct notifier_block *nb)
1399{
Herbert Xu9f514952006-03-25 01:24:25 -08001400 int err;
1401
1402 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001403 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001404 rtnl_unlock();
1405 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001407EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409/**
1410 * call_netdevice_notifiers - call all network notifier blocks
1411 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001412 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 *
1414 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001415 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 */
1417
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001418int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001420 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421}
1422
1423/* When > 0 there are consumers of rx skb time stamps */
1424static atomic_t netstamp_needed = ATOMIC_INIT(0);
1425
1426void net_enable_timestamp(void)
1427{
1428 atomic_inc(&netstamp_needed);
1429}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001430EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432void net_disable_timestamp(void)
1433{
1434 atomic_dec(&netstamp_needed);
1435}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001436EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001438static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
1440 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001441 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001442 else
1443 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444}
1445
Arnd Bergmann44540962009-11-26 06:07:08 +00001446/**
1447 * dev_forward_skb - loopback an skb to another netif
1448 *
1449 * @dev: destination network device
1450 * @skb: buffer to forward
1451 *
1452 * return values:
1453 * NET_RX_SUCCESS (no congestion)
1454 * NET_RX_DROP (packet was dropped)
1455 *
1456 * dev_forward_skb can be used for injecting an skb from the
1457 * start_xmit function of one device into the receive queue
1458 * of another device.
1459 *
1460 * The receiving device may be in another namespace, so
1461 * we have to clear all information in the skb that could
1462 * impact namespace isolation.
1463 */
1464int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1465{
1466 skb_orphan(skb);
1467
1468 if (!(dev->flags & IFF_UP))
1469 return NET_RX_DROP;
1470
1471 if (skb->len > (dev->mtu + dev->hard_header_len))
1472 return NET_RX_DROP;
1473
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001474 skb_set_dev(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001475 skb->tstamp.tv64 = 0;
1476 skb->pkt_type = PACKET_HOST;
1477 skb->protocol = eth_type_trans(skb, dev);
Arnd Bergmann44540962009-11-26 06:07:08 +00001478 return netif_rx(skb);
1479}
1480EXPORT_SYMBOL_GPL(dev_forward_skb);
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482/*
1483 * Support routine. Sends outgoing frames to any network
1484 * taps currently in use.
1485 */
1486
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001487static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488{
1489 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001490
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001491#ifdef CONFIG_NET_CLS_ACT
1492 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1493 net_timestamp(skb);
1494#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001495 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001496#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 rcu_read_lock();
1499 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1500 /* Never send packets back to the socket
1501 * they originated from - MvS (miquels@drinkel.ow.org)
1502 */
1503 if ((ptype->dev == dev || !ptype->dev) &&
1504 (ptype->af_packet_priv == NULL ||
1505 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001506 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 if (!skb2)
1508 break;
1509
1510 /* skb->nh should be correctly
1511 set by sender, so that the second statement is
1512 just protection against buggy protocols.
1513 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001514 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001516 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001517 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 if (net_ratelimit())
1519 printk(KERN_CRIT "protocol %04x is "
1520 "buggy, dev %s\n",
1521 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001522 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
1524
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001525 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001527 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 }
1529 }
1530 rcu_read_unlock();
1531}
1532
Denis Vlasenko56079432006-03-29 15:57:29 -08001533
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001534static inline void __netif_reschedule(struct Qdisc *q)
1535{
1536 struct softnet_data *sd;
1537 unsigned long flags;
1538
1539 local_irq_save(flags);
1540 sd = &__get_cpu_var(softnet_data);
1541 q->next_sched = sd->output_queue;
1542 sd->output_queue = q;
1543 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1544 local_irq_restore(flags);
1545}
1546
David S. Miller37437bb2008-07-16 02:15:04 -07001547void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001548{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001549 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1550 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001551}
1552EXPORT_SYMBOL(__netif_schedule);
1553
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001554void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001555{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001556 if (atomic_dec_and_test(&skb->users)) {
1557 struct softnet_data *sd;
1558 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001559
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001560 local_irq_save(flags);
1561 sd = &__get_cpu_var(softnet_data);
1562 skb->next = sd->completion_queue;
1563 sd->completion_queue = skb;
1564 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1565 local_irq_restore(flags);
1566 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001567}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001568EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001569
1570void dev_kfree_skb_any(struct sk_buff *skb)
1571{
1572 if (in_irq() || irqs_disabled())
1573 dev_kfree_skb_irq(skb);
1574 else
1575 dev_kfree_skb(skb);
1576}
1577EXPORT_SYMBOL(dev_kfree_skb_any);
1578
1579
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001580/**
1581 * netif_device_detach - mark device as removed
1582 * @dev: network device
1583 *
1584 * Mark device as removed from system and therefore no longer available.
1585 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001586void netif_device_detach(struct net_device *dev)
1587{
1588 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1589 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001590 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001591 }
1592}
1593EXPORT_SYMBOL(netif_device_detach);
1594
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001595/**
1596 * netif_device_attach - mark device as attached
1597 * @dev: network device
1598 *
1599 * Mark device as attached from system and restart if needed.
1600 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001601void netif_device_attach(struct net_device *dev)
1602{
1603 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1604 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001605 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001606 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001607 }
1608}
1609EXPORT_SYMBOL(netif_device_attach);
1610
Ben Hutchings6de329e2008-06-16 17:02:28 -07001611static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1612{
1613 return ((features & NETIF_F_GEN_CSUM) ||
1614 ((features & NETIF_F_IP_CSUM) &&
1615 protocol == htons(ETH_P_IP)) ||
1616 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001617 protocol == htons(ETH_P_IPV6)) ||
1618 ((features & NETIF_F_FCOE_CRC) &&
1619 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001620}
1621
1622static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1623{
1624 if (can_checksum_protocol(dev->features, skb->protocol))
1625 return true;
1626
1627 if (skb->protocol == htons(ETH_P_8021Q)) {
1628 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1629 if (can_checksum_protocol(dev->features & dev->vlan_features,
1630 veh->h_vlan_encapsulated_proto))
1631 return true;
1632 }
1633
1634 return false;
1635}
Denis Vlasenko56079432006-03-29 15:57:29 -08001636
Arnd Bergmann8a83a002010-01-30 12:23:03 +00001637/**
1638 * skb_dev_set -- assign a new device to a buffer
1639 * @skb: buffer for the new device
1640 * @dev: network device
1641 *
1642 * If an skb is owned by a device already, we have to reset
1643 * all data private to the namespace a device belongs to
1644 * before assigning it a new device.
1645 */
1646#ifdef CONFIG_NET_NS
1647void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1648{
1649 skb_dst_drop(skb);
1650 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1651 secpath_reset(skb);
1652 nf_reset(skb);
1653 skb_init_secmark(skb);
1654 skb->mark = 0;
1655 skb->priority = 0;
1656 skb->nf_trace = 0;
1657 skb->ipvs_property = 0;
1658#ifdef CONFIG_NET_SCHED
1659 skb->tc_index = 0;
1660#endif
1661 }
1662 skb->dev = dev;
1663}
1664EXPORT_SYMBOL(skb_set_dev);
1665#endif /* CONFIG_NET_NS */
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667/*
1668 * Invalidate hardware checksum when packet is to be mangled, and
1669 * complete checksum manually on outgoing path.
1670 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001671int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672{
Al Virod3bc23e2006-11-14 21:24:49 -08001673 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001674 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
Patrick McHardy84fa7932006-08-29 16:44:56 -07001676 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001677 goto out_set_summed;
1678
1679 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001680 /* Let GSO fix up the checksum. */
1681 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 }
1683
Herbert Xua0308472007-10-15 01:47:15 -07001684 offset = skb->csum_start - skb_headroom(skb);
1685 BUG_ON(offset >= skb_headlen(skb));
1686 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1687
1688 offset += skb->csum_offset;
1689 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1690
1691 if (skb_cloned(skb) &&
1692 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1694 if (ret)
1695 goto out;
1696 }
1697
Herbert Xua0308472007-10-15 01:47:15 -07001698 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001699out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001701out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return ret;
1703}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001704EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001706/**
1707 * skb_gso_segment - Perform segmentation on skb.
1708 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001709 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001710 *
1711 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001712 *
1713 * It may return NULL if the skb requires no segmentation. This is
1714 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001715 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001716struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001717{
1718 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1719 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001720 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001721 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001722
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001723 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001724 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001725 __skb_pull(skb, skb->mac_len);
1726
Herbert Xu67fd1a72009-01-19 16:26:44 -08001727 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1728 struct net_device *dev = skb->dev;
1729 struct ethtool_drvinfo info = {};
1730
1731 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1732 dev->ethtool_ops->get_drvinfo(dev, &info);
1733
1734 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1735 "ip_summed=%d",
1736 info.driver, dev ? dev->features : 0L,
1737 skb->sk ? skb->sk->sk_route_caps : 0L,
1738 skb->len, skb->data_len, skb->ip_summed);
1739
Herbert Xua430a432006-07-08 13:34:56 -07001740 if (skb_header_cloned(skb) &&
1741 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1742 return ERR_PTR(err);
1743 }
1744
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001745 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001746 list_for_each_entry_rcu(ptype,
1747 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001748 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001749 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001750 err = ptype->gso_send_check(skb);
1751 segs = ERR_PTR(err);
1752 if (err || skb_gso_ok(skb, features))
1753 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001754 __skb_push(skb, (skb->data -
1755 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001756 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001757 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001758 break;
1759 }
1760 }
1761 rcu_read_unlock();
1762
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001763 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001764
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001765 return segs;
1766}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001767EXPORT_SYMBOL(skb_gso_segment);
1768
Herbert Xufb286bb2005-11-10 13:01:24 -08001769/* Take action when hardware reception checksum errors are detected. */
1770#ifdef CONFIG_BUG
1771void netdev_rx_csum_fault(struct net_device *dev)
1772{
1773 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001774 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001775 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001776 dump_stack();
1777 }
1778}
1779EXPORT_SYMBOL(netdev_rx_csum_fault);
1780#endif
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782/* Actually, we should eliminate this check as soon as we know, that:
1783 * 1. IOMMU is present and allows to map all the memory.
1784 * 2. No high memory really exists on this machine.
1785 */
1786
1787static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1788{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001789#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 int i;
1791
1792 if (dev->features & NETIF_F_HIGHDMA)
1793 return 0;
1794
1795 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1796 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1797 return 1;
1798
Herbert Xu3d3a8532006-06-27 13:33:10 -07001799#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 return 0;
1801}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001803struct dev_gso_cb {
1804 void (*destructor)(struct sk_buff *skb);
1805};
1806
1807#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1808
1809static void dev_gso_skb_destructor(struct sk_buff *skb)
1810{
1811 struct dev_gso_cb *cb;
1812
1813 do {
1814 struct sk_buff *nskb = skb->next;
1815
1816 skb->next = nskb->next;
1817 nskb->next = NULL;
1818 kfree_skb(nskb);
1819 } while (skb->next);
1820
1821 cb = DEV_GSO_CB(skb);
1822 if (cb->destructor)
1823 cb->destructor(skb);
1824}
1825
1826/**
1827 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1828 * @skb: buffer to segment
1829 *
1830 * This function segments the given skb and stores the list of segments
1831 * in skb->next.
1832 */
1833static int dev_gso_segment(struct sk_buff *skb)
1834{
1835 struct net_device *dev = skb->dev;
1836 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001837 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1838 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001839
Herbert Xu576a30e2006-06-27 13:22:38 -07001840 segs = skb_gso_segment(skb, features);
1841
1842 /* Verifying header integrity only. */
1843 if (!segs)
1844 return 0;
1845
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001846 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001847 return PTR_ERR(segs);
1848
1849 skb->next = segs;
1850 DEV_GSO_CB(skb)->destructor = skb->destructor;
1851 skb->destructor = dev_gso_skb_destructor;
1852
1853 return 0;
1854}
1855
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001856int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1857 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001858{
Stephen Hemminger00829822008-11-20 20:14:53 -08001859 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001860 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001861
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001862 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001863 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001864 dev_queue_xmit_nit(skb, dev);
1865
Herbert Xu576a30e2006-06-27 13:22:38 -07001866 if (netif_needs_gso(dev, skb)) {
1867 if (unlikely(dev_gso_segment(skb)))
1868 goto out_kfree_skb;
1869 if (skb->next)
1870 goto gso;
1871 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001872
Eric Dumazet93f154b2009-05-18 22:19:19 -07001873 /*
1874 * If device doesnt need skb->dst, release it right now while
1875 * its hot in this cpu cache
1876 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001877 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1878 skb_dst_drop(skb);
1879
Patrick Ohlyac45f602009-02-12 05:03:37 +00001880 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001881 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001882 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001883 /*
1884 * TODO: if skb_orphan() was called by
1885 * dev->hard_start_xmit() (for example, the unmodified
1886 * igb driver does that; bnx2 doesn't), then
1887 * skb_tx_software_timestamp() will be unable to send
1888 * back the time stamp.
1889 *
1890 * How can this be prevented? Always create another
1891 * reference to the socket before calling
1892 * dev->hard_start_xmit()? Prevent that skb_orphan()
1893 * does anything in dev->hard_start_xmit() by clearing
1894 * the skb destructor before the call and restoring it
1895 * afterwards, then doing the skb_orphan() ourselves?
1896 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001897 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001898 }
1899
Herbert Xu576a30e2006-06-27 13:22:38 -07001900gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001901 do {
1902 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001903
1904 skb->next = nskb->next;
1905 nskb->next = NULL;
Krishna Kumar068a2de2009-12-09 20:59:58 +00001906
1907 /*
1908 * If device doesnt need nskb->dst, release it right now while
1909 * its hot in this cpu cache
1910 */
1911 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1912 skb_dst_drop(nskb);
1913
Stephen Hemminger00829822008-11-20 20:14:53 -08001914 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001915 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001916 if (rc & ~NETDEV_TX_MASK)
1917 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001918 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001919 skb->next = nskb;
1920 return rc;
1921 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001922 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001923 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001924 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001925 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001926
Patrick McHardy572a9d72009-11-10 06:14:14 +00001927out_kfree_gso_skb:
1928 if (likely(skb->next == NULL))
1929 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001930out_kfree_skb:
1931 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001932 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001933}
1934
David S. Miller70192982009-01-27 16:34:47 -08001935static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001936
Stephen Hemminger92477442009-03-21 13:39:26 -07001937u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001938{
David S. Miller70192982009-01-27 16:34:47 -08001939 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001940
David S. Miller513de112009-05-03 14:43:10 -07001941 if (skb_rx_queue_recorded(skb)) {
1942 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001943 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001944 hash -= dev->real_num_tx_queues;
1945 return hash;
1946 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001947
1948 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001949 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001950 else
David S. Miller70192982009-01-27 16:34:47 -08001951 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001952
David S. Miller70192982009-01-27 16:34:47 -08001953 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001954
David S. Millerb6b2fed2008-07-21 09:48:06 -07001955 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001956}
Stephen Hemminger92477442009-03-21 13:39:26 -07001957EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001958
Eric Dumazeted046422009-11-13 21:54:04 +00001959static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1960{
1961 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1962 if (net_ratelimit()) {
1963 WARN(1, "%s selects TX queue %d, but "
1964 "real number of TX queues is %d\n",
1965 dev->name, queue_index,
1966 dev->real_num_tx_queues);
1967 }
1968 return 0;
1969 }
1970 return queue_index;
1971}
1972
David S. Millere8a04642008-07-17 00:34:19 -07001973static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1974 struct sk_buff *skb)
1975{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001976 u16 queue_index;
1977 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001978
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001979 if (sk_tx_queue_recorded(sk)) {
1980 queue_index = sk_tx_queue_get(sk);
1981 } else {
1982 const struct net_device_ops *ops = dev->netdev_ops;
1983
1984 if (ops->ndo_select_queue) {
1985 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001986 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001987 } else {
1988 queue_index = 0;
1989 if (dev->real_num_tx_queues > 1)
1990 queue_index = skb_tx_hash(dev, skb);
1991
1992 if (sk && sk->sk_dst_cache)
1993 sk_tx_queue_set(sk, queue_index);
1994 }
1995 }
David S. Millereae792b2008-07-15 03:03:33 -07001996
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001997 skb_set_queue_mapping(skb, queue_index);
1998 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001999}
2000
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002001static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2002 struct net_device *dev,
2003 struct netdev_queue *txq)
2004{
2005 spinlock_t *root_lock = qdisc_lock(q);
2006 int rc;
2007
2008 spin_lock(root_lock);
2009 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2010 kfree_skb(skb);
2011 rc = NET_XMIT_DROP;
2012 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2013 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2014 /*
2015 * This is a work-conserving queue; there are no old skbs
2016 * waiting to be sent out; and the qdisc is not running -
2017 * xmit the skb directly.
2018 */
2019 __qdisc_update_bstats(q, skb->len);
2020 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2021 __qdisc_run(q);
2022 else
2023 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2024
2025 rc = NET_XMIT_SUCCESS;
2026 } else {
2027 rc = qdisc_enqueue_root(skb, q);
2028 qdisc_run(q);
2029 }
2030 spin_unlock(root_lock);
2031
2032 return rc;
2033}
2034
Krishna Kumar4b258462010-01-21 01:26:29 -08002035/*
2036 * Returns true if either:
2037 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2038 * 2. skb is fragmented and the device does not support SG, or if
2039 * at least one of fragments is in highmem and device does not
2040 * support DMA from it.
2041 */
2042static inline int skb_needs_linearize(struct sk_buff *skb,
2043 struct net_device *dev)
2044{
2045 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2046 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2047 illegal_highdma(dev, skb)));
2048}
2049
Dave Jonesd29f7492008-07-22 14:09:06 -07002050/**
2051 * dev_queue_xmit - transmit a buffer
2052 * @skb: buffer to transmit
2053 *
2054 * Queue a buffer for transmission to a network device. The caller must
2055 * have set the device and priority and built the buffer before calling
2056 * this function. The function can be called from an interrupt.
2057 *
2058 * A negative errno code is returned on a failure. A success does not
2059 * guarantee the frame will be transmitted as it may be dropped due
2060 * to congestion or traffic shaping.
2061 *
2062 * -----------------------------------------------------------------------------------
2063 * I notice this method can also return errors from the queue disciplines,
2064 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2065 * be positive.
2066 *
2067 * Regardless of the return value, the skb is consumed, so it is currently
2068 * difficult to retry a send to this method. (You can bump the ref count
2069 * before sending to hold a reference for retry if you are careful.)
2070 *
2071 * When calling this method, interrupts MUST be enabled. This is because
2072 * the BH enable code must have IRQs enabled so that it will not deadlock.
2073 * --BLG
2074 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075int dev_queue_xmit(struct sk_buff *skb)
2076{
2077 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07002078 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 struct Qdisc *q;
2080 int rc = -ENOMEM;
2081
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002082 /* GSO will handle the following emulations directly. */
2083 if (netif_needs_gso(dev, skb))
2084 goto gso;
2085
Krishna Kumar4b258462010-01-21 01:26:29 -08002086 /* Convert a paged skb to linear, if required */
2087 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 goto out_kfree_skb;
2089
2090 /* If packet is not checksummed and device does not support
2091 * checksumming for this protocol, complete checksumming here.
2092 */
Herbert Xu663ead32007-04-09 11:59:07 -07002093 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2094 skb_set_transport_header(skb, skb->csum_start -
2095 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07002096 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2097 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07002098 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
Herbert Xuf6a78bf2006-06-22 02:57:17 -07002100gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002101 /* Disable soft irqs for various locks below. Also
2102 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002104 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
David S. Millereae792b2008-07-15 03:03:33 -07002106 txq = dev_pick_tx(dev, skb);
Paul E. McKenneya898def2010-02-22 17:04:49 -08002107 q = rcu_dereference_bh(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07002108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002110 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111#endif
2112 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002113 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002114 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 }
2116
2117 /* The device has no queue. Common case for software devices:
2118 loopback, all the sorts of tunnels...
2119
Herbert Xu932ff272006-06-09 12:20:56 -07002120 Really, it is unlikely that netif_tx_lock protection is necessary
2121 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 counters.)
2123 However, it is possible, that they rely on protection
2124 made by us here.
2125
2126 Check this and shot the lock. It is not prone from deadlocks.
2127 Either shot noqueue qdisc, it is even simpler 8)
2128 */
2129 if (dev->flags & IFF_UP) {
2130 int cpu = smp_processor_id(); /* ok because BHs are off */
2131
David S. Millerc773e842008-07-08 23:13:53 -07002132 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
David S. Millerc773e842008-07-08 23:13:53 -07002134 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002136 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002137 rc = dev_hard_start_xmit(skb, dev, txq);
2138 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002139 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 goto out;
2141 }
2142 }
David S. Millerc773e842008-07-08 23:13:53 -07002143 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 if (net_ratelimit())
2145 printk(KERN_CRIT "Virtual device %s asks to "
2146 "queue packet!\n", dev->name);
2147 } else {
2148 /* Recursion is detected! It is possible,
2149 * unfortunately */
2150 if (net_ratelimit())
2151 printk(KERN_CRIT "Dead loop on virtual device "
2152 "%s, fix it urgently!\n", dev->name);
2153 }
2154 }
2155
2156 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002157 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159out_kfree_skb:
2160 kfree_skb(skb);
2161 return rc;
2162out:
Herbert Xud4828d82006-06-22 02:28:18 -07002163 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 return rc;
2165}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002166EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168
2169/*=======================================================================
2170 Receiver routines
2171 =======================================================================*/
2172
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002173int netdev_max_backlog __read_mostly = 1000;
2174int netdev_budget __read_mostly = 300;
2175int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2178
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180/**
2181 * netif_rx - post buffer to the network code
2182 * @skb: buffer to post
2183 *
2184 * This function receives a packet from a device driver and queues it for
2185 * the upper (protocol) levels to process. It always succeeds. The buffer
2186 * may be dropped during processing for congestion control or by the
2187 * protocol layers.
2188 *
2189 * return values:
2190 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 * NET_RX_DROP (packet was dropped)
2192 *
2193 */
2194
2195int netif_rx(struct sk_buff *skb)
2196{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 struct softnet_data *queue;
2198 unsigned long flags;
2199
2200 /* if netpoll wants it, pretend we never saw it */
2201 if (netpoll_rx(skb))
2202 return NET_RX_DROP;
2203
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002204 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002205 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 /*
2208 * The code is rearranged so that the path is the most
2209 * short when CPU is congested, but is still operating.
2210 */
2211 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 queue = &__get_cpu_var(softnet_data);
2213
2214 __get_cpu_var(netdev_rx_stat).total++;
2215 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2216 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002220 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 }
2222
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002223 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 goto enqueue;
2225 }
2226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 __get_cpu_var(netdev_rx_stat).dropped++;
2228 local_irq_restore(flags);
2229
2230 kfree_skb(skb);
2231 return NET_RX_DROP;
2232}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002233EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
2235int netif_rx_ni(struct sk_buff *skb)
2236{
2237 int err;
2238
2239 preempt_disable();
2240 err = netif_rx(skb);
2241 if (local_softirq_pending())
2242 do_softirq();
2243 preempt_enable();
2244
2245 return err;
2246}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247EXPORT_SYMBOL(netif_rx_ni);
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249static void net_tx_action(struct softirq_action *h)
2250{
2251 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2252
2253 if (sd->completion_queue) {
2254 struct sk_buff *clist;
2255
2256 local_irq_disable();
2257 clist = sd->completion_queue;
2258 sd->completion_queue = NULL;
2259 local_irq_enable();
2260
2261 while (clist) {
2262 struct sk_buff *skb = clist;
2263 clist = clist->next;
2264
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002265 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 __kfree_skb(skb);
2267 }
2268 }
2269
2270 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002271 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
2273 local_irq_disable();
2274 head = sd->output_queue;
2275 sd->output_queue = NULL;
2276 local_irq_enable();
2277
2278 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002279 struct Qdisc *q = head;
2280 spinlock_t *root_lock;
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 head = head->next_sched;
2283
David S. Miller5fb66222008-08-02 20:02:43 -07002284 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002285 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002286 smp_mb__before_clear_bit();
2287 clear_bit(__QDISC_STATE_SCHED,
2288 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002289 qdisc_run(q);
2290 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002292 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002293 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002294 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002295 } else {
2296 smp_mb__before_clear_bit();
2297 clear_bit(__QDISC_STATE_SCHED,
2298 &q->state);
2299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 }
2301 }
2302 }
2303}
2304
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002305static inline int deliver_skb(struct sk_buff *skb,
2306 struct packet_type *pt_prev,
2307 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308{
2309 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002310 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311}
2312
2313#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002314
2315#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2316/* This hook is defined here for ATM LANE */
2317int (*br_fdb_test_addr_hook)(struct net_device *dev,
2318 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002319EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002320#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
Stephen Hemminger6229e362007-03-21 13:38:47 -07002322/*
2323 * If bridge module is loaded call bridging hook.
2324 * returns NULL if packet was consumed.
2325 */
2326struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2327 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002328EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002329
Stephen Hemminger6229e362007-03-21 13:38:47 -07002330static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2331 struct packet_type **pt_prev, int *ret,
2332 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333{
2334 struct net_bridge_port *port;
2335
Stephen Hemminger6229e362007-03-21 13:38:47 -07002336 if (skb->pkt_type == PACKET_LOOPBACK ||
2337 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2338 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
2340 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002341 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002343 }
2344
Stephen Hemminger6229e362007-03-21 13:38:47 -07002345 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346}
2347#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002348#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349#endif
2350
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002351#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2352struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2353EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2354
2355static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2356 struct packet_type **pt_prev,
2357 int *ret,
2358 struct net_device *orig_dev)
2359{
2360 if (skb->dev->macvlan_port == NULL)
2361 return skb;
2362
2363 if (*pt_prev) {
2364 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2365 *pt_prev = NULL;
2366 }
2367 return macvlan_handle_frame_hook(skb);
2368}
2369#else
2370#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2371#endif
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373#ifdef CONFIG_NET_CLS_ACT
2374/* TODO: Maybe we should just force sch_ingress to be compiled in
2375 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2376 * a compare and 2 stores extra right now if we dont have it on
2377 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002378 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 * the ingress scheduler, you just cant add policies on ingress.
2380 *
2381 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002382static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002385 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002386 struct netdev_queue *rxq;
2387 int result = TC_ACT_OK;
2388 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002389
Herbert Xuf697c3e2007-10-14 00:38:47 -07002390 if (MAX_RED_LOOP < ttl++) {
2391 printk(KERN_WARNING
2392 "Redir loop detected Dropping packet (%d->%d)\n",
Eric Dumazet8964be42009-11-20 15:35:04 -08002393 skb->skb_iif, dev->ifindex);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002394 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 }
2396
Herbert Xuf697c3e2007-10-14 00:38:47 -07002397 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2398 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2399
David S. Miller555353c2008-07-08 17:33:13 -07002400 rxq = &dev->rx_queue;
2401
David S. Miller83874002008-07-17 00:53:03 -07002402 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002403 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002404 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002405 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2406 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002407 spin_unlock(qdisc_lock(q));
2408 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 return result;
2411}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002412
2413static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2414 struct packet_type **pt_prev,
2415 int *ret, struct net_device *orig_dev)
2416{
David S. Miller8d50b532008-07-30 02:37:46 -07002417 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002418 goto out;
2419
2420 if (*pt_prev) {
2421 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2422 *pt_prev = NULL;
2423 } else {
2424 /* Huh? Why does turning on AF_PACKET affect this? */
2425 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2426 }
2427
2428 switch (ing_filter(skb)) {
2429 case TC_ACT_SHOT:
2430 case TC_ACT_STOLEN:
2431 kfree_skb(skb);
2432 return NULL;
2433 }
2434
2435out:
2436 skb->tc_verd = 0;
2437 return skb;
2438}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439#endif
2440
Patrick McHardybc1d0412008-07-14 22:49:30 -07002441/*
2442 * netif_nit_deliver - deliver received packets to network taps
2443 * @skb: buffer
2444 *
2445 * This function is used to deliver incoming packets to network
2446 * taps. It should be used when the normal netif_receive_skb path
2447 * is bypassed, for example because of VLAN acceleration.
2448 */
2449void netif_nit_deliver(struct sk_buff *skb)
2450{
2451 struct packet_type *ptype;
2452
2453 if (list_empty(&ptype_all))
2454 return;
2455
2456 skb_reset_network_header(skb);
2457 skb_reset_transport_header(skb);
2458 skb->mac_len = skb->network_header - skb->mac_header;
2459
2460 rcu_read_lock();
2461 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2462 if (!ptype->dev || ptype->dev == skb->dev)
2463 deliver_skb(skb, ptype, skb->dev);
2464 }
2465 rcu_read_unlock();
2466}
2467
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002468/**
2469 * netif_receive_skb - process receive buffer from network
2470 * @skb: buffer to process
2471 *
2472 * netif_receive_skb() is the main receive data processing function.
2473 * It always succeeds. The buffer may be dropped during processing
2474 * for congestion control or by the protocol layers.
2475 *
2476 * This function may only be called from softirq context and interrupts
2477 * should be enabled.
2478 *
2479 * Return values (usually ignored):
2480 * NET_RX_SUCCESS: no congestion
2481 * NET_RX_DROP: packet was dropped
2482 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483int netif_receive_skb(struct sk_buff *skb)
2484{
2485 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002486 struct net_device *orig_dev;
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002487 struct net_device *master;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002488 struct net_device *null_or_orig;
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002489 struct net_device *null_or_bond;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002491 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002493 if (!skb->tstamp.tv64)
2494 net_timestamp(skb);
2495
Eric Dumazet05423b22009-10-26 18:40:35 -07002496 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002497 return NET_RX_SUCCESS;
2498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002500 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 return NET_RX_DROP;
2502
Eric Dumazet8964be42009-11-20 15:35:04 -08002503 if (!skb->skb_iif)
2504 skb->skb_iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002505
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002506 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002507 orig_dev = skb->dev;
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002508 master = ACCESS_ONCE(orig_dev->master);
2509 if (master) {
2510 if (skb_bond_should_drop(skb, master))
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002511 null_or_orig = orig_dev; /* deliver only exact match */
2512 else
Eric Dumazet0641e4f2010-03-18 21:16:45 -07002513 skb->dev = master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002514 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002515
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 __get_cpu_var(netdev_rx_stat).total++;
2517
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002518 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002519 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002520 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 pt_prev = NULL;
2523
2524 rcu_read_lock();
2525
2526#ifdef CONFIG_NET_CLS_ACT
2527 if (skb->tc_verd & TC_NCLS) {
2528 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2529 goto ncls;
2530 }
2531#endif
2532
2533 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002534 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2535 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002536 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002537 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 pt_prev = ptype;
2539 }
2540 }
2541
2542#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002543 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2544 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546ncls:
2547#endif
2548
Stephen Hemminger6229e362007-03-21 13:38:47 -07002549 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2550 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002552 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2553 if (!skb)
2554 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002556 /*
2557 * Make sure frames received on VLAN interfaces stacked on
2558 * bonding interfaces still make their way to any base bonding
2559 * device that may have registered for a specific ptype. The
2560 * handler may have to adjust skb->dev and orig_dev.
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002561 */
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002562 null_or_bond = NULL;
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002563 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2564 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002565 null_or_bond = vlan_dev_real_dev(skb->dev);
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002566 }
2567
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002569 list_for_each_entry_rcu(ptype,
2570 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Andy Gospodarek1f3c8802009-12-14 10:48:58 +00002571 if (ptype->type == type && (ptype->dev == null_or_orig ||
Andy Gospodarekca8d9ea2010-01-06 12:56:37 +00002572 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2573 ptype->dev == null_or_bond)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002574 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002575 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 pt_prev = ptype;
2577 }
2578 }
2579
2580 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002581 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 } else {
2583 kfree_skb(skb);
2584 /* Jamal, now you will not able to escape explaining
2585 * me how you were going to use this. :-)
2586 */
2587 ret = NET_RX_DROP;
2588 }
2589
2590out:
2591 rcu_read_unlock();
2592 return ret;
2593}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002594EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002596/* Network device is going away, flush any packets still pending */
2597static void flush_backlog(void *arg)
2598{
2599 struct net_device *dev = arg;
2600 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2601 struct sk_buff *skb, *tmp;
2602
2603 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2604 if (skb->dev == dev) {
2605 __skb_unlink(skb, &queue->input_pkt_queue);
2606 kfree_skb(skb);
2607 }
2608}
2609
Herbert Xud565b0a2008-12-15 23:38:52 -08002610static int napi_gro_complete(struct sk_buff *skb)
2611{
2612 struct packet_type *ptype;
2613 __be16 type = skb->protocol;
2614 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2615 int err = -ENOENT;
2616
Herbert Xufc59f9a2009-04-14 15:11:06 -07002617 if (NAPI_GRO_CB(skb)->count == 1) {
2618 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002619 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002620 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002621
2622 rcu_read_lock();
2623 list_for_each_entry_rcu(ptype, head, list) {
2624 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2625 continue;
2626
2627 err = ptype->gro_complete(skb);
2628 break;
2629 }
2630 rcu_read_unlock();
2631
2632 if (err) {
2633 WARN_ON(&ptype->list == head);
2634 kfree_skb(skb);
2635 return NET_RX_SUCCESS;
2636 }
2637
2638out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002639 return netif_receive_skb(skb);
2640}
2641
David S. Miller11380a42010-01-19 13:46:10 -08002642static void napi_gro_flush(struct napi_struct *napi)
Herbert Xud565b0a2008-12-15 23:38:52 -08002643{
2644 struct sk_buff *skb, *next;
2645
2646 for (skb = napi->gro_list; skb; skb = next) {
2647 next = skb->next;
2648 skb->next = NULL;
2649 napi_gro_complete(skb);
2650 }
2651
Herbert Xu4ae55442009-02-08 18:00:36 +00002652 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002653 napi->gro_list = NULL;
2654}
Herbert Xud565b0a2008-12-15 23:38:52 -08002655
Ben Hutchings5b252f02009-10-29 07:17:09 +00002656enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002657{
2658 struct sk_buff **pp = NULL;
2659 struct packet_type *ptype;
2660 __be16 type = skb->protocol;
2661 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002662 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002663 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002664 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002665
2666 if (!(skb->dev->features & NETIF_F_GRO))
2667 goto normal;
2668
David S. Miller4cf704f2009-06-09 00:18:51 -07002669 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002670 goto normal;
2671
Herbert Xud565b0a2008-12-15 23:38:52 -08002672 rcu_read_lock();
2673 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002674 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2675 continue;
2676
Herbert Xu86911732009-01-29 14:19:50 +00002677 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002678 mac_len = skb->network_header - skb->mac_header;
2679 skb->mac_len = mac_len;
2680 NAPI_GRO_CB(skb)->same_flow = 0;
2681 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002682 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002683
Herbert Xud565b0a2008-12-15 23:38:52 -08002684 pp = ptype->gro_receive(&napi->gro_list, skb);
2685 break;
2686 }
2687 rcu_read_unlock();
2688
2689 if (&ptype->list == head)
2690 goto normal;
2691
Herbert Xu0da2afd52008-12-26 14:57:42 -08002692 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002693 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002694
Herbert Xud565b0a2008-12-15 23:38:52 -08002695 if (pp) {
2696 struct sk_buff *nskb = *pp;
2697
2698 *pp = nskb->next;
2699 nskb->next = NULL;
2700 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002701 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002702 }
2703
Herbert Xu0da2afd52008-12-26 14:57:42 -08002704 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002705 goto ok;
2706
Herbert Xu4ae55442009-02-08 18:00:36 +00002707 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002708 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002709
Herbert Xu4ae55442009-02-08 18:00:36 +00002710 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002711 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002712 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002713 skb->next = napi->gro_list;
2714 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002715 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002716
Herbert Xuad0f9902009-02-01 01:24:55 -08002717pull:
Herbert Xucb189782009-05-26 18:50:31 +00002718 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2719 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2720
2721 BUG_ON(skb->end - skb->tail < grow);
2722
2723 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2724
2725 skb->tail += grow;
2726 skb->data_len -= grow;
2727
2728 skb_shinfo(skb)->frags[0].page_offset += grow;
2729 skb_shinfo(skb)->frags[0].size -= grow;
2730
2731 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2732 put_page(skb_shinfo(skb)->frags[0].page);
2733 memmove(skb_shinfo(skb)->frags,
2734 skb_shinfo(skb)->frags + 1,
2735 --skb_shinfo(skb)->nr_frags);
2736 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002737 }
2738
Herbert Xud565b0a2008-12-15 23:38:52 -08002739ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002740 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002741
2742normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002743 ret = GRO_NORMAL;
2744 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002745}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002746EXPORT_SYMBOL(dev_gro_receive);
2747
Ben Hutchings5b252f02009-10-29 07:17:09 +00002748static gro_result_t
2749__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002750{
2751 struct sk_buff *p;
2752
Herbert Xud1c76af2009-03-16 10:50:02 -07002753 if (netpoll_rx_on(skb))
2754 return GRO_NORMAL;
2755
Herbert Xu96e93ea2009-01-06 10:49:34 -08002756 for (p = napi->gro_list; p; p = p->next) {
Joe Perchesf64f9e72009-11-29 16:55:45 -08002757 NAPI_GRO_CB(p)->same_flow =
2758 (p->dev == skb->dev) &&
2759 !compare_ether_header(skb_mac_header(p),
2760 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002761 NAPI_GRO_CB(p)->flush = 0;
2762 }
2763
2764 return dev_gro_receive(napi, skb);
2765}
Herbert Xu5d38a072009-01-04 16:13:40 -08002766
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002767gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002768{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002769 switch (ret) {
2770 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002771 if (netif_receive_skb(skb))
2772 ret = GRO_DROP;
2773 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002774
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002775 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002776 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002777 kfree_skb(skb);
2778 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002779
2780 case GRO_HELD:
2781 case GRO_MERGED:
2782 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002783 }
2784
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002785 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002786}
2787EXPORT_SYMBOL(napi_skb_finish);
2788
Herbert Xu78a478d2009-05-26 18:50:21 +00002789void skb_gro_reset_offset(struct sk_buff *skb)
2790{
2791 NAPI_GRO_CB(skb)->data_offset = 0;
2792 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002793 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002794
Herbert Xu78d3fd02009-05-26 18:50:23 +00002795 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002796 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002797 NAPI_GRO_CB(skb)->frag0 =
2798 page_address(skb_shinfo(skb)->frags[0].page) +
2799 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002800 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2801 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002802}
2803EXPORT_SYMBOL(skb_gro_reset_offset);
2804
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002805gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002806{
Herbert Xu86911732009-01-29 14:19:50 +00002807 skb_gro_reset_offset(skb);
2808
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002809 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002810}
2811EXPORT_SYMBOL(napi_gro_receive);
2812
Herbert Xu96e93ea2009-01-06 10:49:34 -08002813void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2814{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002815 __skb_pull(skb, skb_headlen(skb));
2816 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2817
2818 napi->skb = skb;
2819}
2820EXPORT_SYMBOL(napi_reuse_skb);
2821
Herbert Xu76620aa2009-04-16 02:02:07 -07002822struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002823{
Herbert Xu5d38a072009-01-04 16:13:40 -08002824 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002825
2826 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002827 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2828 if (skb)
2829 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002830 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002831 return skb;
2832}
Herbert Xu76620aa2009-04-16 02:02:07 -07002833EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002834
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002835gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2836 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002837{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002838 switch (ret) {
2839 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002840 case GRO_HELD:
Ajit Khapardee76b69c2010-02-16 20:25:43 +00002841 skb->protocol = eth_type_trans(skb, skb->dev);
Herbert Xu86911732009-01-29 14:19:50 +00002842
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002843 if (ret == GRO_HELD)
2844 skb_gro_pull(skb, -ETH_HLEN);
2845 else if (netif_receive_skb(skb))
2846 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002847 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002848
2849 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002850 case GRO_MERGED_FREE:
2851 napi_reuse_skb(napi, skb);
2852 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002853
2854 case GRO_MERGED:
2855 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002856 }
2857
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002858 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002859}
2860EXPORT_SYMBOL(napi_frags_finish);
2861
Herbert Xu76620aa2009-04-16 02:02:07 -07002862struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002863{
Herbert Xu76620aa2009-04-16 02:02:07 -07002864 struct sk_buff *skb = napi->skb;
2865 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002866 unsigned int hlen;
2867 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002868
2869 napi->skb = NULL;
2870
2871 skb_reset_mac_header(skb);
2872 skb_gro_reset_offset(skb);
2873
Herbert Xua5b1cf22009-05-26 18:50:28 +00002874 off = skb_gro_offset(skb);
2875 hlen = off + sizeof(*eth);
2876 eth = skb_gro_header_fast(skb, off);
2877 if (skb_gro_header_hard(skb, hlen)) {
2878 eth = skb_gro_header_slow(skb, hlen, off);
2879 if (unlikely(!eth)) {
2880 napi_reuse_skb(napi, skb);
2881 skb = NULL;
2882 goto out;
2883 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002884 }
2885
2886 skb_gro_pull(skb, sizeof(*eth));
2887
2888 /*
2889 * This works because the only protocols we care about don't require
2890 * special handling. We'll fix it up properly at the end.
2891 */
2892 skb->protocol = eth->h_proto;
2893
2894out:
2895 return skb;
2896}
2897EXPORT_SYMBOL(napi_frags_skb);
2898
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002899gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002900{
2901 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002902
2903 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002904 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002905
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002906 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002907}
2908EXPORT_SYMBOL(napi_gro_frags);
2909
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002910static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911{
2912 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2914 unsigned long start_time = jiffies;
2915
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002916 napi->weight = weight_p;
2917 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
2920 local_irq_disable();
2921 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002922 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002923 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002924 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002925 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 local_irq_enable();
2928
Herbert Xu8f1ead22009-03-26 00:59:10 -07002929 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002930 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002932 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933}
2934
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002935/**
2936 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002937 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002938 *
2939 * The entry's receive function will be scheduled to run
2940 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002941void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002942{
2943 unsigned long flags;
2944
2945 local_irq_save(flags);
2946 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2947 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2948 local_irq_restore(flags);
2949}
2950EXPORT_SYMBOL(__napi_schedule);
2951
Herbert Xud565b0a2008-12-15 23:38:52 -08002952void __napi_complete(struct napi_struct *n)
2953{
2954 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2955 BUG_ON(n->gro_list);
2956
2957 list_del(&n->poll_list);
2958 smp_mb__before_clear_bit();
2959 clear_bit(NAPI_STATE_SCHED, &n->state);
2960}
2961EXPORT_SYMBOL(__napi_complete);
2962
2963void napi_complete(struct napi_struct *n)
2964{
2965 unsigned long flags;
2966
2967 /*
2968 * don't let napi dequeue from the cpu poll list
2969 * just in case its running on a different cpu
2970 */
2971 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2972 return;
2973
2974 napi_gro_flush(n);
2975 local_irq_save(flags);
2976 __napi_complete(n);
2977 local_irq_restore(flags);
2978}
2979EXPORT_SYMBOL(napi_complete);
2980
2981void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2982 int (*poll)(struct napi_struct *, int), int weight)
2983{
2984 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002985 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002986 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002987 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002988 napi->poll = poll;
2989 napi->weight = weight;
2990 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002991 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002992#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002993 spin_lock_init(&napi->poll_lock);
2994 napi->poll_owner = -1;
2995#endif
2996 set_bit(NAPI_STATE_SCHED, &napi->state);
2997}
2998EXPORT_SYMBOL(netif_napi_add);
2999
3000void netif_napi_del(struct napi_struct *napi)
3001{
3002 struct sk_buff *skb, *next;
3003
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08003004 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07003005 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08003006
3007 for (skb = napi->gro_list; skb; skb = next) {
3008 next = skb->next;
3009 skb->next = NULL;
3010 kfree_skb(skb);
3011 }
3012
3013 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00003014 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08003015}
3016EXPORT_SYMBOL(netif_napi_del);
3017
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003018
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019static void net_rx_action(struct softirq_action *h)
3020{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003021 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003022 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07003023 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07003024 void *have;
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 local_irq_disable();
3027
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003028 while (!list_empty(list)) {
3029 struct napi_struct *n;
3030 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003032 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003033 * Allow this to run for 2 jiffies since which will allow
3034 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003035 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08003036 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 goto softnet_break;
3038
3039 local_irq_enable();
3040
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003041 /* Even though interrupts have been re-enabled, this
3042 * access is safe because interrupts can only add new
3043 * entries to the tail of this list, and only ->poll()
3044 * calls can remove this head entry from the list.
3045 */
stephen hemmingere5e26d72010-02-24 14:01:38 +00003046 n = list_first_entry(list, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003048 have = netpoll_poll_lock(n);
3049
3050 weight = n->weight;
3051
David S. Miller0a7606c2007-10-29 21:28:47 -07003052 /* This NAPI_STATE_SCHED test is for avoiding a race
3053 * with netpoll's poll_napi(). Only the entity which
3054 * obtains the lock and sees NAPI_STATE_SCHED set will
3055 * actually make the ->poll() call. Therefore we avoid
3056 * accidently calling ->poll() when NAPI is not scheduled.
3057 */
3058 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00003059 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07003060 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00003061 trace_napi_poll(n);
3062 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003063
3064 WARN_ON_ONCE(work > weight);
3065
3066 budget -= work;
3067
3068 local_irq_disable();
3069
3070 /* Drivers must not modify the NAPI state if they
3071 * consume the entire weight. In such cases this code
3072 * still "owns" the NAPI instance and therefore can
3073 * move the instance around on the list at-will.
3074 */
David S. Millerfed17f32008-01-07 21:00:40 -08003075 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07003076 if (unlikely(napi_disable_pending(n))) {
3077 local_irq_enable();
3078 napi_complete(n);
3079 local_irq_disable();
3080 } else
David S. Millerfed17f32008-01-07 21:00:40 -08003081 list_move_tail(&n->poll_list, list);
3082 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003083
3084 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 }
3086out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07003087 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003088
Chris Leechdb217332006-06-17 21:24:58 -07003089#ifdef CONFIG_NET_DMA
3090 /*
3091 * There may not be any more sk_buffs coming right now, so push
3092 * any pending DMA copies to hardware
3093 */
Dan Williams2ba05622009-01-06 11:38:14 -07003094 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07003095#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003096
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097 return;
3098
3099softnet_break:
3100 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3101 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3102 goto out;
3103}
3104
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003105static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106
3107/**
3108 * register_gifconf - register a SIOCGIF handler
3109 * @family: Address family
3110 * @gifconf: Function handler
3111 *
3112 * Register protocol dependent address dumping routines. The handler
3113 * that is passed must not be freed or reused until it has been replaced
3114 * by another handler.
3115 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003116int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117{
3118 if (family >= NPROTO)
3119 return -EINVAL;
3120 gifconf_list[family] = gifconf;
3121 return 0;
3122}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003123EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124
3125
3126/*
3127 * Map an interface index to its name (SIOCGIFNAME)
3128 */
3129
3130/*
3131 * We need this ioctl for efficient implementation of the
3132 * if_indextoname() function required by the IPv6 API. Without
3133 * it, we would have to search all the interfaces to find a
3134 * match. --pb
3135 */
3136
Eric W. Biederman881d9662007-09-17 11:56:21 -07003137static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138{
3139 struct net_device *dev;
3140 struct ifreq ifr;
3141
3142 /*
3143 * Fetch the caller's info block.
3144 */
3145
3146 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3147 return -EFAULT;
3148
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003149 rcu_read_lock();
3150 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003152 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 return -ENODEV;
3154 }
3155
3156 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003157 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
3159 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3160 return -EFAULT;
3161 return 0;
3162}
3163
3164/*
3165 * Perform a SIOCGIFCONF call. This structure will change
3166 * size eventually, and there is nothing I can do about it.
3167 * Thus we will need a 'compatibility mode'.
3168 */
3169
Eric W. Biederman881d9662007-09-17 11:56:21 -07003170static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171{
3172 struct ifconf ifc;
3173 struct net_device *dev;
3174 char __user *pos;
3175 int len;
3176 int total;
3177 int i;
3178
3179 /*
3180 * Fetch the caller's info block.
3181 */
3182
3183 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3184 return -EFAULT;
3185
3186 pos = ifc.ifc_buf;
3187 len = ifc.ifc_len;
3188
3189 /*
3190 * Loop over the interfaces, and write an info block for each.
3191 */
3192
3193 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003194 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 for (i = 0; i < NPROTO; i++) {
3196 if (gifconf_list[i]) {
3197 int done;
3198 if (!pos)
3199 done = gifconf_list[i](dev, NULL, 0);
3200 else
3201 done = gifconf_list[i](dev, pos + total,
3202 len - total);
3203 if (done < 0)
3204 return -EFAULT;
3205 total += done;
3206 }
3207 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209
3210 /*
3211 * All done. Write the updated control block back to the caller.
3212 */
3213 ifc.ifc_len = total;
3214
3215 /*
3216 * Both BSD and Solaris return 0 here, so we do too.
3217 */
3218 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3219}
3220
3221#ifdef CONFIG_PROC_FS
3222/*
3223 * This is invoked by the /proc filesystem handler to display a device
3224 * in detail.
3225 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003227 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228{
Denis V. Luneve372c412007-11-19 22:31:54 -08003229 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003230 loff_t off;
3231 struct net_device *dev;
3232
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003233 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003234 if (!*pos)
3235 return SEQ_START_TOKEN;
3236
3237 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003238 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003239 if (off++ == *pos)
3240 return dev;
3241
3242 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243}
3244
3245void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3246{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003247 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3248 first_net_device(seq_file_net(seq)) :
3249 next_net_device((struct net_device *)v);
3250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003252 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253}
3254
3255void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003256 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003258 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259}
3260
3261static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3262{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003263 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
Jesper Dangaard Brouer2d13baf2010-01-05 05:50:52 +00003265 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
Rusty Russell5a1b5892007-04-28 21:04:03 -07003266 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3267 dev->name, stats->rx_bytes, stats->rx_packets,
3268 stats->rx_errors,
3269 stats->rx_dropped + stats->rx_missed_errors,
3270 stats->rx_fifo_errors,
3271 stats->rx_length_errors + stats->rx_over_errors +
3272 stats->rx_crc_errors + stats->rx_frame_errors,
3273 stats->rx_compressed, stats->multicast,
3274 stats->tx_bytes, stats->tx_packets,
3275 stats->tx_errors, stats->tx_dropped,
3276 stats->tx_fifo_errors, stats->collisions,
3277 stats->tx_carrier_errors +
3278 stats->tx_aborted_errors +
3279 stats->tx_window_errors +
3280 stats->tx_heartbeat_errors,
3281 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282}
3283
3284/*
3285 * Called from the PROCfs module. This now uses the new arbitrary sized
3286 * /proc/net interface to create /proc/net/dev
3287 */
3288static int dev_seq_show(struct seq_file *seq, void *v)
3289{
3290 if (v == SEQ_START_TOKEN)
3291 seq_puts(seq, "Inter-| Receive "
3292 " | Transmit\n"
3293 " face |bytes packets errs drop fifo frame "
3294 "compressed multicast|bytes packets errs "
3295 "drop fifo colls carrier compressed\n");
3296 else
3297 dev_seq_printf_stats(seq, v);
3298 return 0;
3299}
3300
3301static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3302{
3303 struct netif_rx_stats *rc = NULL;
3304
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003305 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003306 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 rc = &per_cpu(netdev_rx_stat, *pos);
3308 break;
3309 } else
3310 ++*pos;
3311 return rc;
3312}
3313
3314static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3315{
3316 return softnet_get_online(pos);
3317}
3318
3319static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3320{
3321 ++*pos;
3322 return softnet_get_online(pos);
3323}
3324
3325static void softnet_seq_stop(struct seq_file *seq, void *v)
3326{
3327}
3328
3329static int softnet_seq_show(struct seq_file *seq, void *v)
3330{
3331 struct netif_rx_stats *s = v;
3332
3333 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003334 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003335 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003336 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 return 0;
3338}
3339
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003340static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 .start = dev_seq_start,
3342 .next = dev_seq_next,
3343 .stop = dev_seq_stop,
3344 .show = dev_seq_show,
3345};
3346
3347static int dev_seq_open(struct inode *inode, struct file *file)
3348{
Denis V. Luneve372c412007-11-19 22:31:54 -08003349 return seq_open_net(inode, file, &dev_seq_ops,
3350 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351}
3352
Arjan van de Ven9a321442007-02-12 00:55:35 -08003353static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 .owner = THIS_MODULE,
3355 .open = dev_seq_open,
3356 .read = seq_read,
3357 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003358 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359};
3360
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003361static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 .start = softnet_seq_start,
3363 .next = softnet_seq_next,
3364 .stop = softnet_seq_stop,
3365 .show = softnet_seq_show,
3366};
3367
3368static int softnet_seq_open(struct inode *inode, struct file *file)
3369{
3370 return seq_open(file, &softnet_seq_ops);
3371}
3372
Arjan van de Ven9a321442007-02-12 00:55:35 -08003373static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 .owner = THIS_MODULE,
3375 .open = softnet_seq_open,
3376 .read = seq_read,
3377 .llseek = seq_lseek,
3378 .release = seq_release,
3379};
3380
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003381static void *ptype_get_idx(loff_t pos)
3382{
3383 struct packet_type *pt = NULL;
3384 loff_t i = 0;
3385 int t;
3386
3387 list_for_each_entry_rcu(pt, &ptype_all, list) {
3388 if (i == pos)
3389 return pt;
3390 ++i;
3391 }
3392
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003393 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003394 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3395 if (i == pos)
3396 return pt;
3397 ++i;
3398 }
3399 }
3400 return NULL;
3401}
3402
3403static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003404 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003405{
3406 rcu_read_lock();
3407 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3408}
3409
3410static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3411{
3412 struct packet_type *pt;
3413 struct list_head *nxt;
3414 int hash;
3415
3416 ++*pos;
3417 if (v == SEQ_START_TOKEN)
3418 return ptype_get_idx(0);
3419
3420 pt = v;
3421 nxt = pt->list.next;
3422 if (pt->type == htons(ETH_P_ALL)) {
3423 if (nxt != &ptype_all)
3424 goto found;
3425 hash = 0;
3426 nxt = ptype_base[0].next;
3427 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003428 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003429
3430 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003431 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003432 return NULL;
3433 nxt = ptype_base[hash].next;
3434 }
3435found:
3436 return list_entry(nxt, struct packet_type, list);
3437}
3438
3439static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003440 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003441{
3442 rcu_read_unlock();
3443}
3444
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003445static int ptype_seq_show(struct seq_file *seq, void *v)
3446{
3447 struct packet_type *pt = v;
3448
3449 if (v == SEQ_START_TOKEN)
3450 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003451 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003452 if (pt->type == htons(ETH_P_ALL))
3453 seq_puts(seq, "ALL ");
3454 else
3455 seq_printf(seq, "%04x", ntohs(pt->type));
3456
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003457 seq_printf(seq, " %-8s %pF\n",
3458 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003459 }
3460
3461 return 0;
3462}
3463
3464static const struct seq_operations ptype_seq_ops = {
3465 .start = ptype_seq_start,
3466 .next = ptype_seq_next,
3467 .stop = ptype_seq_stop,
3468 .show = ptype_seq_show,
3469};
3470
3471static int ptype_seq_open(struct inode *inode, struct file *file)
3472{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003473 return seq_open_net(inode, file, &ptype_seq_ops,
3474 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003475}
3476
3477static const struct file_operations ptype_seq_fops = {
3478 .owner = THIS_MODULE,
3479 .open = ptype_seq_open,
3480 .read = seq_read,
3481 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003482 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003483};
3484
3485
Pavel Emelyanov46650792007-10-08 20:38:39 -07003486static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
3488 int rc = -ENOMEM;
3489
Eric W. Biederman881d9662007-09-17 11:56:21 -07003490 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003492 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003494 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003495 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003496
Eric W. Biederman881d9662007-09-17 11:56:21 -07003497 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003498 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 rc = 0;
3500out:
3501 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003502out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003503 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003505 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003507 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 goto out;
3509}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003510
Pavel Emelyanov46650792007-10-08 20:38:39 -07003511static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003512{
3513 wext_proc_exit(net);
3514
3515 proc_net_remove(net, "ptype");
3516 proc_net_remove(net, "softnet_stat");
3517 proc_net_remove(net, "dev");
3518}
3519
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003520static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003521 .init = dev_proc_net_init,
3522 .exit = dev_proc_net_exit,
3523};
3524
3525static int __init dev_proc_init(void)
3526{
3527 return register_pernet_subsys(&dev_proc_ops);
3528}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529#else
3530#define dev_proc_init() 0
3531#endif /* CONFIG_PROC_FS */
3532
3533
3534/**
3535 * netdev_set_master - set up master/slave pair
3536 * @slave: slave device
3537 * @master: new master device
3538 *
3539 * Changes the master device of the slave. Pass %NULL to break the
3540 * bonding. The caller must hold the RTNL semaphore. On a failure
3541 * a negative errno code is returned. On success the reference counts
3542 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3543 * function returns zero.
3544 */
3545int netdev_set_master(struct net_device *slave, struct net_device *master)
3546{
3547 struct net_device *old = slave->master;
3548
3549 ASSERT_RTNL();
3550
3551 if (master) {
3552 if (old)
3553 return -EBUSY;
3554 dev_hold(master);
3555 }
3556
3557 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003558
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 synchronize_net();
3560
3561 if (old)
3562 dev_put(old);
3563
3564 if (master)
3565 slave->flags |= IFF_SLAVE;
3566 else
3567 slave->flags &= ~IFF_SLAVE;
3568
3569 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3570 return 0;
3571}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003572EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003574static void dev_change_rx_flags(struct net_device *dev, int flags)
3575{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003576 const struct net_device_ops *ops = dev->netdev_ops;
3577
3578 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3579 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003580}
3581
Wang Chendad9b332008-06-18 01:48:28 -07003582static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003583{
3584 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003585 uid_t uid;
3586 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003587
Patrick McHardy24023452007-07-14 18:51:31 -07003588 ASSERT_RTNL();
3589
Wang Chendad9b332008-06-18 01:48:28 -07003590 dev->flags |= IFF_PROMISC;
3591 dev->promiscuity += inc;
3592 if (dev->promiscuity == 0) {
3593 /*
3594 * Avoid overflow.
3595 * If inc causes overflow, untouch promisc and return error.
3596 */
3597 if (inc < 0)
3598 dev->flags &= ~IFF_PROMISC;
3599 else {
3600 dev->promiscuity -= inc;
3601 printk(KERN_WARNING "%s: promiscuity touches roof, "
3602 "set promiscuity failed, promiscuity feature "
3603 "of device might be broken.\n", dev->name);
3604 return -EOVERFLOW;
3605 }
3606 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003607 if (dev->flags != old_flags) {
3608 printk(KERN_INFO "device %s %s promiscuous mode\n",
3609 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3610 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003611 if (audit_enabled) {
3612 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003613 audit_log(current->audit_context, GFP_ATOMIC,
3614 AUDIT_ANOM_PROMISCUOUS,
3615 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3616 dev->name, (dev->flags & IFF_PROMISC),
3617 (old_flags & IFF_PROMISC),
3618 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003619 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003620 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003621 }
Patrick McHardy24023452007-07-14 18:51:31 -07003622
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003623 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003624 }
Wang Chendad9b332008-06-18 01:48:28 -07003625 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003626}
3627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628/**
3629 * dev_set_promiscuity - update promiscuity count on a device
3630 * @dev: device
3631 * @inc: modifier
3632 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003633 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 * remains above zero the interface remains promiscuous. Once it hits zero
3635 * the device reverts back to normal filtering operation. A negative inc
3636 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003637 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 */
Wang Chendad9b332008-06-18 01:48:28 -07003639int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640{
3641 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003642 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
Wang Chendad9b332008-06-18 01:48:28 -07003644 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003645 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003646 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003647 if (dev->flags != old_flags)
3648 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003649 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003651EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652
3653/**
3654 * dev_set_allmulti - update allmulti count on a device
3655 * @dev: device
3656 * @inc: modifier
3657 *
3658 * Add or remove reception of all multicast frames to a device. While the
3659 * count in the device remains above zero the interface remains listening
3660 * to all interfaces. Once it hits zero the device reverts back to normal
3661 * filtering operation. A negative @inc value is used to drop the counter
3662 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003663 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 */
3665
Wang Chendad9b332008-06-18 01:48:28 -07003666int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667{
3668 unsigned short old_flags = dev->flags;
3669
Patrick McHardy24023452007-07-14 18:51:31 -07003670 ASSERT_RTNL();
3671
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003673 dev->allmulti += inc;
3674 if (dev->allmulti == 0) {
3675 /*
3676 * Avoid overflow.
3677 * If inc causes overflow, untouch allmulti and return error.
3678 */
3679 if (inc < 0)
3680 dev->flags &= ~IFF_ALLMULTI;
3681 else {
3682 dev->allmulti -= inc;
3683 printk(KERN_WARNING "%s: allmulti touches roof, "
3684 "set allmulti failed, allmulti feature of "
3685 "device might be broken.\n", dev->name);
3686 return -EOVERFLOW;
3687 }
3688 }
Patrick McHardy24023452007-07-14 18:51:31 -07003689 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003690 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003691 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003692 }
Wang Chendad9b332008-06-18 01:48:28 -07003693 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003694}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003695EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003696
3697/*
3698 * Upload unicast and multicast address lists to device and
3699 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003700 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003701 * are present.
3702 */
3703void __dev_set_rx_mode(struct net_device *dev)
3704{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003705 const struct net_device_ops *ops = dev->netdev_ops;
3706
Patrick McHardy4417da62007-06-27 01:28:10 -07003707 /* dev_open will call this function so the list will stay sane. */
3708 if (!(dev->flags&IFF_UP))
3709 return;
3710
3711 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003712 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003713
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003714 if (ops->ndo_set_rx_mode)
3715 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003716 else {
3717 /* Unicast addresses changes may only happen under the rtnl,
3718 * therefore calling __dev_set_promiscuity here is safe.
3719 */
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003720 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003721 __dev_set_promiscuity(dev, 1);
3722 dev->uc_promisc = 1;
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08003723 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003724 __dev_set_promiscuity(dev, -1);
3725 dev->uc_promisc = 0;
3726 }
3727
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003728 if (ops->ndo_set_multicast_list)
3729 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003730 }
3731}
3732
3733void dev_set_rx_mode(struct net_device *dev)
3734{
David S. Millerb9e40852008-07-15 00:15:08 -07003735 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003736 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003737 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738}
3739
Jiri Pirkof001fde2009-05-05 02:48:28 +00003740/* hw addresses list handling functions */
3741
Jiri Pirko31278e72009-06-17 01:12:19 +00003742static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3743 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003744{
3745 struct netdev_hw_addr *ha;
3746 int alloc_size;
3747
3748 if (addr_len > MAX_ADDR_LEN)
3749 return -EINVAL;
3750
Jiri Pirko31278e72009-06-17 01:12:19 +00003751 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003752 if (!memcmp(ha->addr, addr, addr_len) &&
3753 ha->type == addr_type) {
3754 ha->refcount++;
3755 return 0;
3756 }
3757 }
3758
3759
Jiri Pirkof001fde2009-05-05 02:48:28 +00003760 alloc_size = sizeof(*ha);
3761 if (alloc_size < L1_CACHE_BYTES)
3762 alloc_size = L1_CACHE_BYTES;
3763 ha = kmalloc(alloc_size, GFP_ATOMIC);
3764 if (!ha)
3765 return -ENOMEM;
3766 memcpy(ha->addr, addr, addr_len);
3767 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003768 ha->refcount = 1;
3769 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003770 list_add_tail_rcu(&ha->list, &list->list);
3771 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003772 return 0;
3773}
3774
3775static void ha_rcu_free(struct rcu_head *head)
3776{
3777 struct netdev_hw_addr *ha;
3778
3779 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3780 kfree(ha);
3781}
3782
Jiri Pirko31278e72009-06-17 01:12:19 +00003783static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3784 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003785{
3786 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003787
Jiri Pirko31278e72009-06-17 01:12:19 +00003788 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003789 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003790 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003791 if (--ha->refcount)
3792 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003793 list_del_rcu(&ha->list);
3794 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003795 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003796 return 0;
3797 }
3798 }
3799 return -ENOENT;
3800}
3801
Jiri Pirko31278e72009-06-17 01:12:19 +00003802static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3803 struct netdev_hw_addr_list *from_list,
3804 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003805 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003806{
3807 int err;
3808 struct netdev_hw_addr *ha, *ha2;
3809 unsigned char type;
3810
Jiri Pirko31278e72009-06-17 01:12:19 +00003811 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003812 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003813 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003814 if (err)
3815 goto unroll;
3816 }
3817 return 0;
3818
3819unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003820 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003821 if (ha2 == ha)
3822 break;
3823 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003824 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003825 }
3826 return err;
3827}
3828
Jiri Pirko31278e72009-06-17 01:12:19 +00003829static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3830 struct netdev_hw_addr_list *from_list,
3831 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003832 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003833{
3834 struct netdev_hw_addr *ha;
3835 unsigned char type;
3836
Jiri Pirko31278e72009-06-17 01:12:19 +00003837 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003838 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003839 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003840 }
3841}
3842
Jiri Pirko31278e72009-06-17 01:12:19 +00003843static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3844 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003845 int addr_len)
3846{
3847 int err = 0;
3848 struct netdev_hw_addr *ha, *tmp;
3849
Jiri Pirko31278e72009-06-17 01:12:19 +00003850 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003851 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003852 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003853 addr_len, ha->type);
3854 if (err)
3855 break;
3856 ha->synced = true;
3857 ha->refcount++;
3858 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003859 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3860 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003861 }
3862 }
3863 return err;
3864}
3865
Jiri Pirko31278e72009-06-17 01:12:19 +00003866static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3867 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003868 int addr_len)
3869{
3870 struct netdev_hw_addr *ha, *tmp;
3871
Jiri Pirko31278e72009-06-17 01:12:19 +00003872 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003873 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003874 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003875 addr_len, ha->type);
3876 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003877 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003878 addr_len, ha->type);
3879 }
3880 }
3881}
3882
Jiri Pirko31278e72009-06-17 01:12:19 +00003883static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003884{
3885 struct netdev_hw_addr *ha, *tmp;
3886
Jiri Pirko31278e72009-06-17 01:12:19 +00003887 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003888 list_del_rcu(&ha->list);
3889 call_rcu(&ha->rcu_head, ha_rcu_free);
3890 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003891 list->count = 0;
3892}
3893
3894static void __hw_addr_init(struct netdev_hw_addr_list *list)
3895{
3896 INIT_LIST_HEAD(&list->list);
3897 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003898}
3899
3900/* Device addresses handling functions */
3901
3902static void dev_addr_flush(struct net_device *dev)
3903{
3904 /* rtnl_mutex must be held here */
3905
Jiri Pirko31278e72009-06-17 01:12:19 +00003906 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003907 dev->dev_addr = NULL;
3908}
3909
3910static int dev_addr_init(struct net_device *dev)
3911{
3912 unsigned char addr[MAX_ADDR_LEN];
3913 struct netdev_hw_addr *ha;
3914 int err;
3915
3916 /* rtnl_mutex must be held here */
3917
Jiri Pirko31278e72009-06-17 01:12:19 +00003918 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003919 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003920 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003921 NETDEV_HW_ADDR_T_LAN);
3922 if (!err) {
3923 /*
3924 * Get the first (previously created) address from the list
3925 * and set dev_addr pointer to this location.
3926 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003927 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003928 struct netdev_hw_addr, list);
3929 dev->dev_addr = ha->addr;
3930 }
3931 return err;
3932}
3933
3934/**
3935 * dev_addr_add - Add a device address
3936 * @dev: device
3937 * @addr: address to add
3938 * @addr_type: address type
3939 *
3940 * Add a device address to the device or increase the reference count if
3941 * it already exists.
3942 *
3943 * The caller must hold the rtnl_mutex.
3944 */
3945int dev_addr_add(struct net_device *dev, unsigned char *addr,
3946 unsigned char addr_type)
3947{
3948 int err;
3949
3950 ASSERT_RTNL();
3951
Jiri Pirko31278e72009-06-17 01:12:19 +00003952 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003953 if (!err)
3954 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3955 return err;
3956}
3957EXPORT_SYMBOL(dev_addr_add);
3958
3959/**
3960 * dev_addr_del - Release a device address.
3961 * @dev: device
3962 * @addr: address to delete
3963 * @addr_type: address type
3964 *
3965 * Release reference to a device address and remove it from the device
3966 * if the reference count drops to zero.
3967 *
3968 * The caller must hold the rtnl_mutex.
3969 */
3970int dev_addr_del(struct net_device *dev, unsigned char *addr,
3971 unsigned char addr_type)
3972{
3973 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003974 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003975
3976 ASSERT_RTNL();
3977
Jiri Pirkoccffad252009-05-22 23:22:17 +00003978 /*
3979 * We can not remove the first address from the list because
3980 * dev->dev_addr points to that.
3981 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003982 ha = list_first_entry(&dev->dev_addrs.list,
3983 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003984 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3985 return -ENOENT;
3986
Jiri Pirko31278e72009-06-17 01:12:19 +00003987 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003988 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003989 if (!err)
3990 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3991 return err;
3992}
3993EXPORT_SYMBOL(dev_addr_del);
3994
3995/**
3996 * dev_addr_add_multiple - Add device addresses from another device
3997 * @to_dev: device to which addresses will be added
3998 * @from_dev: device from which addresses will be added
3999 * @addr_type: address type - 0 means type will be used from from_dev
4000 *
4001 * Add device addresses of the one device to another.
4002 **
4003 * The caller must hold the rtnl_mutex.
4004 */
4005int dev_addr_add_multiple(struct net_device *to_dev,
4006 struct net_device *from_dev,
4007 unsigned char addr_type)
4008{
4009 int err;
4010
4011 ASSERT_RTNL();
4012
4013 if (from_dev->addr_len != to_dev->addr_len)
4014 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004015 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004016 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004017 if (!err)
4018 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4019 return err;
4020}
4021EXPORT_SYMBOL(dev_addr_add_multiple);
4022
4023/**
4024 * dev_addr_del_multiple - Delete device addresses by another device
4025 * @to_dev: device where the addresses will be deleted
4026 * @from_dev: device by which addresses the addresses will be deleted
4027 * @addr_type: address type - 0 means type will used from from_dev
4028 *
4029 * Deletes addresses in to device by the list of addresses in from device.
4030 *
4031 * The caller must hold the rtnl_mutex.
4032 */
4033int dev_addr_del_multiple(struct net_device *to_dev,
4034 struct net_device *from_dev,
4035 unsigned char addr_type)
4036{
4037 ASSERT_RTNL();
4038
4039 if (from_dev->addr_len != to_dev->addr_len)
4040 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00004041 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00004042 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00004043 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4044 return 0;
4045}
4046EXPORT_SYMBOL(dev_addr_del_multiple);
4047
Jiri Pirko31278e72009-06-17 01:12:19 +00004048/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00004049
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004050int __dev_addr_delete(struct dev_addr_list **list, int *count,
4051 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004052{
4053 struct dev_addr_list *da;
4054
4055 for (; (da = *list) != NULL; list = &da->next) {
4056 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4057 alen == da->da_addrlen) {
4058 if (glbl) {
4059 int old_glbl = da->da_gusers;
4060 da->da_gusers = 0;
4061 if (old_glbl == 0)
4062 break;
4063 }
4064 if (--da->da_users)
4065 return 0;
4066
4067 *list = da->next;
4068 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004069 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07004070 return 0;
4071 }
4072 }
4073 return -ENOENT;
4074}
4075
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004076int __dev_addr_add(struct dev_addr_list **list, int *count,
4077 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07004078{
4079 struct dev_addr_list *da;
4080
4081 for (da = *list; da != NULL; da = da->next) {
4082 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4083 da->da_addrlen == alen) {
4084 if (glbl) {
4085 int old_glbl = da->da_gusers;
4086 da->da_gusers = 1;
4087 if (old_glbl)
4088 return 0;
4089 }
4090 da->da_users++;
4091 return 0;
4092 }
4093 }
4094
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08004095 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07004096 if (da == NULL)
4097 return -ENOMEM;
4098 memcpy(da->da_addr, addr, alen);
4099 da->da_addrlen = alen;
4100 da->da_users = 1;
4101 da->da_gusers = glbl ? 1 : 0;
4102 da->next = *list;
4103 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004104 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07004105 return 0;
4106}
4107
Patrick McHardy4417da62007-06-27 01:28:10 -07004108/**
4109 * dev_unicast_delete - Release secondary unicast address.
4110 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004111 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07004112 *
4113 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004114 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07004115 *
4116 * The caller must hold the rtnl_mutex.
4117 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004118int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004119{
4120 int err;
4121
4122 ASSERT_RTNL();
4123
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004124 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004125 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4126 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004127 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004128 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004129 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004130 return err;
4131}
4132EXPORT_SYMBOL(dev_unicast_delete);
4133
4134/**
4135 * dev_unicast_add - add a secondary unicast address
4136 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004137 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004138 *
4139 * Add a secondary unicast address to the device or increase
4140 * the reference count if it already exists.
4141 *
4142 * The caller must hold the rtnl_mutex.
4143 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004144int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004145{
4146 int err;
4147
4148 ASSERT_RTNL();
4149
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004150 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004151 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4152 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004153 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004154 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004155 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004156 return err;
4157}
4158EXPORT_SYMBOL(dev_unicast_add);
4159
Chris Leeche83a2ea2008-01-31 16:53:23 -08004160int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4161 struct dev_addr_list **from, int *from_count)
4162{
4163 struct dev_addr_list *da, *next;
4164 int err = 0;
4165
4166 da = *from;
4167 while (da != NULL) {
4168 next = da->next;
4169 if (!da->da_synced) {
4170 err = __dev_addr_add(to, to_count,
4171 da->da_addr, da->da_addrlen, 0);
4172 if (err < 0)
4173 break;
4174 da->da_synced = 1;
4175 da->da_users++;
4176 } else if (da->da_users == 1) {
4177 __dev_addr_delete(to, to_count,
4178 da->da_addr, da->da_addrlen, 0);
4179 __dev_addr_delete(from, from_count,
4180 da->da_addr, da->da_addrlen, 0);
4181 }
4182 da = next;
4183 }
4184 return err;
4185}
Johannes Bergc4029082009-06-17 17:43:30 +02004186EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004187
4188void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4189 struct dev_addr_list **from, int *from_count)
4190{
4191 struct dev_addr_list *da, *next;
4192
4193 da = *from;
4194 while (da != NULL) {
4195 next = da->next;
4196 if (da->da_synced) {
4197 __dev_addr_delete(to, to_count,
4198 da->da_addr, da->da_addrlen, 0);
4199 da->da_synced = 0;
4200 __dev_addr_delete(from, from_count,
4201 da->da_addr, da->da_addrlen, 0);
4202 }
4203 da = next;
4204 }
4205}
Johannes Bergc4029082009-06-17 17:43:30 +02004206EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004207
4208/**
4209 * dev_unicast_sync - Synchronize device's unicast list to another device
4210 * @to: destination device
4211 * @from: source device
4212 *
4213 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004214 * addresses that have no users left. The source device must be
4215 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004216 *
4217 * This function is intended to be called from the dev->set_rx_mode
4218 * function of layered software devices.
4219 */
4220int dev_unicast_sync(struct net_device *to, struct net_device *from)
4221{
4222 int err = 0;
4223
Jiri Pirkoccffad252009-05-22 23:22:17 +00004224 if (to->addr_len != from->addr_len)
4225 return -EINVAL;
4226
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004227 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004228 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004229 if (!err)
4230 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004231 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004232 return err;
4233}
4234EXPORT_SYMBOL(dev_unicast_sync);
4235
4236/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004237 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004238 * @to: destination device
4239 * @from: source device
4240 *
4241 * Remove all addresses that were added to the destination device by
4242 * dev_unicast_sync(). This function is intended to be called from the
4243 * dev->stop function of layered software devices.
4244 */
4245void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4246{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004247 if (to->addr_len != from->addr_len)
4248 return;
4249
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004250 netif_addr_lock_bh(from);
4251 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004252 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004253 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004254 netif_addr_unlock(to);
4255 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004256}
4257EXPORT_SYMBOL(dev_unicast_unsync);
4258
Jiri Pirkoccffad252009-05-22 23:22:17 +00004259static void dev_unicast_flush(struct net_device *dev)
4260{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004261 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004262 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004263 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004264}
4265
4266static void dev_unicast_init(struct net_device *dev)
4267{
Jiri Pirko31278e72009-06-17 01:12:19 +00004268 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004269}
4270
4271
Denis Cheng12972622007-07-18 02:12:56 -07004272static void __dev_addr_discard(struct dev_addr_list **list)
4273{
4274 struct dev_addr_list *tmp;
4275
4276 while (*list != NULL) {
4277 tmp = *list;
4278 *list = tmp->next;
4279 if (tmp->da_users > tmp->da_gusers)
4280 printk("__dev_addr_discard: address leakage! "
4281 "da_users=%d\n", tmp->da_users);
4282 kfree(tmp);
4283 }
4284}
4285
Denis Cheng26cc2522007-07-18 02:12:03 -07004286static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004287{
David S. Millerb9e40852008-07-15 00:15:08 -07004288 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004289
Denis Cheng456ad752007-07-18 02:10:54 -07004290 __dev_addr_discard(&dev->mc_list);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004291 netdev_mc_count(dev) = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004292
David S. Millerb9e40852008-07-15 00:15:08 -07004293 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004294}
4295
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004296/**
4297 * dev_get_flags - get flags reported to userspace
4298 * @dev: device
4299 *
4300 * Get the combination of flag bits exported through APIs to userspace.
4301 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302unsigned dev_get_flags(const struct net_device *dev)
4303{
4304 unsigned flags;
4305
4306 flags = (dev->flags & ~(IFF_PROMISC |
4307 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004308 IFF_RUNNING |
4309 IFF_LOWER_UP |
4310 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 (dev->gflags & (IFF_PROMISC |
4312 IFF_ALLMULTI));
4313
Stefan Rompfb00055a2006-03-20 17:09:11 -08004314 if (netif_running(dev)) {
4315 if (netif_oper_up(dev))
4316 flags |= IFF_RUNNING;
4317 if (netif_carrier_ok(dev))
4318 flags |= IFF_LOWER_UP;
4319 if (netif_dormant(dev))
4320 flags |= IFF_DORMANT;
4321 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322
4323 return flags;
4324}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004325EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326
Patrick McHardybd380812010-02-26 06:34:53 +00004327int __dev_change_flags(struct net_device *dev, unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 int old_flags = dev->flags;
Patrick McHardybd380812010-02-26 06:34:53 +00004330 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331
Patrick McHardy24023452007-07-14 18:51:31 -07004332 ASSERT_RTNL();
4333
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 /*
4335 * Set the flags on our device.
4336 */
4337
4338 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4339 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4340 IFF_AUTOMEDIA)) |
4341 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4342 IFF_ALLMULTI));
4343
4344 /*
4345 * Load in the correct multicast list now the flags have changed.
4346 */
4347
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004348 if ((old_flags ^ flags) & IFF_MULTICAST)
4349 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004350
Patrick McHardy4417da62007-06-27 01:28:10 -07004351 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 /*
4354 * Have we downed the interface. We handle IFF_UP ourselves
4355 * according to user attempts to set it, rather than blindly
4356 * setting it.
4357 */
4358
4359 ret = 0;
4360 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
Patrick McHardybd380812010-02-26 06:34:53 +00004361 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
4363 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004364 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365 }
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004368 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4369
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 dev->gflags ^= IFF_PROMISC;
4371 dev_set_promiscuity(dev, inc);
4372 }
4373
4374 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4375 is important. Some (broken) drivers set IFF_PROMISC, when
4376 IFF_ALLMULTI is requested not asking us and not reporting.
4377 */
4378 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004379 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4380
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 dev->gflags ^= IFF_ALLMULTI;
4382 dev_set_allmulti(dev, inc);
4383 }
4384
Patrick McHardybd380812010-02-26 06:34:53 +00004385 return ret;
4386}
4387
4388void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4389{
4390 unsigned int changes = dev->flags ^ old_flags;
4391
4392 if (changes & IFF_UP) {
4393 if (dev->flags & IFF_UP)
4394 call_netdevice_notifiers(NETDEV_UP, dev);
4395 else
4396 call_netdevice_notifiers(NETDEV_DOWN, dev);
4397 }
4398
4399 if (dev->flags & IFF_UP &&
4400 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4401 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4402}
4403
4404/**
4405 * dev_change_flags - change device settings
4406 * @dev: device
4407 * @flags: device state flags
4408 *
4409 * Change settings on device based state flags. The flags are
4410 * in the userspace exported format.
4411 */
4412int dev_change_flags(struct net_device *dev, unsigned flags)
4413{
4414 int ret, changes;
4415 int old_flags = dev->flags;
4416
4417 ret = __dev_change_flags(dev, flags);
4418 if (ret < 0)
4419 return ret;
4420
4421 changes = old_flags ^ dev->flags;
Thomas Graf7c355f52007-06-05 16:03:03 -07004422 if (changes)
4423 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
Patrick McHardybd380812010-02-26 06:34:53 +00004425 __dev_notify_flags(dev, old_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 return ret;
4427}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004428EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004430/**
4431 * dev_set_mtu - Change maximum transfer unit
4432 * @dev: device
4433 * @new_mtu: new transfer unit
4434 *
4435 * Change the maximum transfer size of the network device.
4436 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437int dev_set_mtu(struct net_device *dev, int new_mtu)
4438{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004439 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 int err;
4441
4442 if (new_mtu == dev->mtu)
4443 return 0;
4444
4445 /* MTU must be positive. */
4446 if (new_mtu < 0)
4447 return -EINVAL;
4448
4449 if (!netif_device_present(dev))
4450 return -ENODEV;
4451
4452 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004453 if (ops->ndo_change_mtu)
4454 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455 else
4456 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004457
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004459 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 return err;
4461}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004462EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004464/**
4465 * dev_set_mac_address - Change Media Access Control Address
4466 * @dev: device
4467 * @sa: new address
4468 *
4469 * Change the hardware (MAC) address of the device
4470 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4472{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004473 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 int err;
4475
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004476 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 return -EOPNOTSUPP;
4478 if (sa->sa_family != dev->type)
4479 return -EINVAL;
4480 if (!netif_device_present(dev))
4481 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004482 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004484 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 return err;
4486}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004487EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
4489/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004490 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004492static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493{
4494 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004495 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496
4497 if (!dev)
4498 return -ENODEV;
4499
4500 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004501 case SIOCGIFFLAGS: /* Get interface flags */
4502 ifr->ifr_flags = (short) dev_get_flags(dev);
4503 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004505 case SIOCGIFMETRIC: /* Get the metric on the interface
4506 (currently unused) */
4507 ifr->ifr_metric = 0;
4508 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004510 case SIOCGIFMTU: /* Get the MTU of a device */
4511 ifr->ifr_mtu = dev->mtu;
4512 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004514 case SIOCGIFHWADDR:
4515 if (!dev->addr_len)
4516 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4517 else
4518 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4519 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4520 ifr->ifr_hwaddr.sa_family = dev->type;
4521 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004523 case SIOCGIFSLAVE:
4524 err = -EINVAL;
4525 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004526
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004527 case SIOCGIFMAP:
4528 ifr->ifr_map.mem_start = dev->mem_start;
4529 ifr->ifr_map.mem_end = dev->mem_end;
4530 ifr->ifr_map.base_addr = dev->base_addr;
4531 ifr->ifr_map.irq = dev->irq;
4532 ifr->ifr_map.dma = dev->dma;
4533 ifr->ifr_map.port = dev->if_port;
4534 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004535
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004536 case SIOCGIFINDEX:
4537 ifr->ifr_ifindex = dev->ifindex;
4538 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004539
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004540 case SIOCGIFTXQLEN:
4541 ifr->ifr_qlen = dev->tx_queue_len;
4542 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004543
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004544 default:
4545 /* dev_ioctl() should ensure this case
4546 * is never reached
4547 */
4548 WARN_ON(1);
4549 err = -EINVAL;
4550 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004551
4552 }
4553 return err;
4554}
4555
4556/*
4557 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4558 */
4559static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4560{
4561 int err;
4562 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004563 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004564
4565 if (!dev)
4566 return -ENODEV;
4567
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004568 ops = dev->netdev_ops;
4569
Jeff Garzik14e3e072007-10-08 00:06:32 -07004570 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004571 case SIOCSIFFLAGS: /* Set interface flags */
4572 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004573
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004574 case SIOCSIFMETRIC: /* Set the metric on the interface
4575 (currently unused) */
4576 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004577
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004578 case SIOCSIFMTU: /* Set the MTU of a device */
4579 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004580
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004581 case SIOCSIFHWADDR:
4582 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004584 case SIOCSIFHWBROADCAST:
4585 if (ifr->ifr_hwaddr.sa_family != dev->type)
4586 return -EINVAL;
4587 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4588 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4589 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4590 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004592 case SIOCSIFMAP:
4593 if (ops->ndo_set_config) {
4594 if (!netif_device_present(dev))
4595 return -ENODEV;
4596 return ops->ndo_set_config(dev, &ifr->ifr_map);
4597 }
4598 return -EOPNOTSUPP;
4599
4600 case SIOCADDMULTI:
4601 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4602 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4603 return -EINVAL;
4604 if (!netif_device_present(dev))
4605 return -ENODEV;
4606 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4607 dev->addr_len, 1);
4608
4609 case SIOCDELMULTI:
4610 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4611 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4612 return -EINVAL;
4613 if (!netif_device_present(dev))
4614 return -ENODEV;
4615 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4616 dev->addr_len, 1);
4617
4618 case SIOCSIFTXQLEN:
4619 if (ifr->ifr_qlen < 0)
4620 return -EINVAL;
4621 dev->tx_queue_len = ifr->ifr_qlen;
4622 return 0;
4623
4624 case SIOCSIFNAME:
4625 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4626 return dev_change_name(dev, ifr->ifr_newname);
4627
4628 /*
4629 * Unknown or private ioctl
4630 */
4631 default:
4632 if ((cmd >= SIOCDEVPRIVATE &&
4633 cmd <= SIOCDEVPRIVATE + 15) ||
4634 cmd == SIOCBONDENSLAVE ||
4635 cmd == SIOCBONDRELEASE ||
4636 cmd == SIOCBONDSETHWADDR ||
4637 cmd == SIOCBONDSLAVEINFOQUERY ||
4638 cmd == SIOCBONDINFOQUERY ||
4639 cmd == SIOCBONDCHANGEACTIVE ||
4640 cmd == SIOCGMIIPHY ||
4641 cmd == SIOCGMIIREG ||
4642 cmd == SIOCSMIIREG ||
4643 cmd == SIOCBRADDIF ||
4644 cmd == SIOCBRDELIF ||
4645 cmd == SIOCSHWTSTAMP ||
4646 cmd == SIOCWANDEV) {
4647 err = -EOPNOTSUPP;
4648 if (ops->ndo_do_ioctl) {
4649 if (netif_device_present(dev))
4650 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4651 else
4652 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004654 } else
4655 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
4657 }
4658 return err;
4659}
4660
4661/*
4662 * This function handles all "interface"-type I/O control requests. The actual
4663 * 'doing' part of this is dev_ifsioc above.
4664 */
4665
4666/**
4667 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004668 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 * @cmd: command to issue
4670 * @arg: pointer to a struct ifreq in user space
4671 *
4672 * Issue ioctl functions to devices. This is normally called by the
4673 * user space syscall interfaces but can sometimes be useful for
4674 * other purposes. The return value is the return from the syscall if
4675 * positive or a negative errno code on error.
4676 */
4677
Eric W. Biederman881d9662007-09-17 11:56:21 -07004678int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679{
4680 struct ifreq ifr;
4681 int ret;
4682 char *colon;
4683
4684 /* One special case: SIOCGIFCONF takes ifconf argument
4685 and requires shared lock, because it sleeps writing
4686 to user space.
4687 */
4688
4689 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004690 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004691 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004692 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 return ret;
4694 }
4695 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004696 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697
4698 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4699 return -EFAULT;
4700
4701 ifr.ifr_name[IFNAMSIZ-1] = 0;
4702
4703 colon = strchr(ifr.ifr_name, ':');
4704 if (colon)
4705 *colon = 0;
4706
4707 /*
4708 * See which interface the caller is talking about.
4709 */
4710
4711 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004712 /*
4713 * These ioctl calls:
4714 * - can be done by all.
4715 * - atomic and do not require locking.
4716 * - return a value
4717 */
4718 case SIOCGIFFLAGS:
4719 case SIOCGIFMETRIC:
4720 case SIOCGIFMTU:
4721 case SIOCGIFHWADDR:
4722 case SIOCGIFSLAVE:
4723 case SIOCGIFMAP:
4724 case SIOCGIFINDEX:
4725 case SIOCGIFTXQLEN:
4726 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004727 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004728 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004729 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004730 if (!ret) {
4731 if (colon)
4732 *colon = ':';
4733 if (copy_to_user(arg, &ifr,
4734 sizeof(struct ifreq)))
4735 ret = -EFAULT;
4736 }
4737 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004739 case SIOCETHTOOL:
4740 dev_load(net, ifr.ifr_name);
4741 rtnl_lock();
4742 ret = dev_ethtool(net, &ifr);
4743 rtnl_unlock();
4744 if (!ret) {
4745 if (colon)
4746 *colon = ':';
4747 if (copy_to_user(arg, &ifr,
4748 sizeof(struct ifreq)))
4749 ret = -EFAULT;
4750 }
4751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004753 /*
4754 * These ioctl calls:
4755 * - require superuser power.
4756 * - require strict serialization.
4757 * - return a value
4758 */
4759 case SIOCGMIIPHY:
4760 case SIOCGMIIREG:
4761 case SIOCSIFNAME:
4762 if (!capable(CAP_NET_ADMIN))
4763 return -EPERM;
4764 dev_load(net, ifr.ifr_name);
4765 rtnl_lock();
4766 ret = dev_ifsioc(net, &ifr, cmd);
4767 rtnl_unlock();
4768 if (!ret) {
4769 if (colon)
4770 *colon = ':';
4771 if (copy_to_user(arg, &ifr,
4772 sizeof(struct ifreq)))
4773 ret = -EFAULT;
4774 }
4775 return ret;
4776
4777 /*
4778 * These ioctl calls:
4779 * - require superuser power.
4780 * - require strict serialization.
4781 * - do not return a value
4782 */
4783 case SIOCSIFFLAGS:
4784 case SIOCSIFMETRIC:
4785 case SIOCSIFMTU:
4786 case SIOCSIFMAP:
4787 case SIOCSIFHWADDR:
4788 case SIOCSIFSLAVE:
4789 case SIOCADDMULTI:
4790 case SIOCDELMULTI:
4791 case SIOCSIFHWBROADCAST:
4792 case SIOCSIFTXQLEN:
4793 case SIOCSMIIREG:
4794 case SIOCBONDENSLAVE:
4795 case SIOCBONDRELEASE:
4796 case SIOCBONDSETHWADDR:
4797 case SIOCBONDCHANGEACTIVE:
4798 case SIOCBRADDIF:
4799 case SIOCBRDELIF:
4800 case SIOCSHWTSTAMP:
4801 if (!capable(CAP_NET_ADMIN))
4802 return -EPERM;
4803 /* fall through */
4804 case SIOCBONDSLAVEINFOQUERY:
4805 case SIOCBONDINFOQUERY:
4806 dev_load(net, ifr.ifr_name);
4807 rtnl_lock();
4808 ret = dev_ifsioc(net, &ifr, cmd);
4809 rtnl_unlock();
4810 return ret;
4811
4812 case SIOCGIFMEM:
4813 /* Get the per device memory space. We can add this but
4814 * currently do not support it */
4815 case SIOCSIFMEM:
4816 /* Set the per device memory buffer space.
4817 * Not applicable in our case */
4818 case SIOCSIFLINK:
4819 return -EINVAL;
4820
4821 /*
4822 * Unknown or private ioctl.
4823 */
4824 default:
4825 if (cmd == SIOCWANDEV ||
4826 (cmd >= SIOCDEVPRIVATE &&
4827 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004828 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004830 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004832 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004834 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004836 }
4837 /* Take care of Wireless Extensions */
4838 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4839 return wext_handle_ioctl(net, &ifr, cmd, arg);
4840 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 }
4842}
4843
4844
4845/**
4846 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004847 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848 *
4849 * Returns a suitable unique value for a new device interface
4850 * number. The caller must hold the rtnl semaphore or the
4851 * dev_base_lock to be sure it remains unique.
4852 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004853static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004854{
4855 static int ifindex;
4856 for (;;) {
4857 if (++ifindex <= 0)
4858 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004859 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860 return ifindex;
4861 }
4862}
4863
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004865static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004867static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870}
4871
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004872static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004873{
Krishna Kumare93737b2009-12-08 22:26:02 +00004874 struct net_device *dev, *tmp;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004875
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004876 BUG_ON(dev_boot_phase);
4877 ASSERT_RTNL();
4878
Krishna Kumare93737b2009-12-08 22:26:02 +00004879 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004880 /* Some devices call without registering
Krishna Kumare93737b2009-12-08 22:26:02 +00004881 * for initialization unwind. Remove those
4882 * devices and proceed with the remaining.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004883 */
4884 if (dev->reg_state == NETREG_UNINITIALIZED) {
4885 pr_debug("unregister_netdevice: device %s/%p never "
4886 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004887
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004888 WARN_ON(1);
Krishna Kumare93737b2009-12-08 22:26:02 +00004889 list_del(&dev->unreg_list);
4890 continue;
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004891 }
4892
4893 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4894
4895 /* If device is running, close it first. */
4896 dev_close(dev);
4897
4898 /* And unlink it from device chain. */
4899 unlist_netdevice(dev);
4900
4901 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004902 }
4903
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004904 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004905
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004906 list_for_each_entry(dev, head, unreg_list) {
4907 /* Shutdown queueing discipline. */
4908 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004909
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004910
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004911 /* Notify protocols, that we are about to destroy
4912 this device. They should clean all the things.
4913 */
4914 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4915
Patrick McHardya2835762010-02-26 06:34:51 +00004916 if (!dev->rtnl_link_ops ||
4917 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4918 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4919
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004920 /*
4921 * Flush the unicast and multicast chains
4922 */
4923 dev_unicast_flush(dev);
4924 dev_addr_discard(dev);
4925
4926 if (dev->netdev_ops->ndo_uninit)
4927 dev->netdev_ops->ndo_uninit(dev);
4928
4929 /* Notifier chain MUST detach us from master device. */
4930 WARN_ON(dev->master);
4931
4932 /* Remove entries from kobject tree */
4933 netdev_unregister_kobject(dev);
4934 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004935
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004936 /* Process any work delayed until the end of the batch */
stephen hemmingere5e26d72010-02-24 14:01:38 +00004937 dev = list_first_entry(head, struct net_device, unreg_list);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004938 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4939
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004940 synchronize_net();
4941
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00004942 list_for_each_entry(dev, head, unreg_list)
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004943 dev_put(dev);
4944}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004945
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004946static void rollback_registered(struct net_device *dev)
4947{
4948 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004949
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004950 list_add(&dev->unreg_list, &single);
4951 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004952}
4953
David S. Millere8a04642008-07-17 00:34:19 -07004954static void __netdev_init_queue_locks_one(struct net_device *dev,
4955 struct netdev_queue *dev_queue,
4956 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004957{
4958 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004959 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004960 dev_queue->xmit_lock_owner = -1;
4961}
4962
4963static void netdev_init_queue_locks(struct net_device *dev)
4964{
David S. Millere8a04642008-07-17 00:34:19 -07004965 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4966 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004967}
4968
Herbert Xub63365a2008-10-23 01:11:29 -07004969unsigned long netdev_fix_features(unsigned long features, const char *name)
4970{
4971 /* Fix illegal SG+CSUM combinations. */
4972 if ((features & NETIF_F_SG) &&
4973 !(features & NETIF_F_ALL_CSUM)) {
4974 if (name)
4975 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4976 "checksum feature.\n", name);
4977 features &= ~NETIF_F_SG;
4978 }
4979
4980 /* TSO requires that SG is present as well. */
4981 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4982 if (name)
4983 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4984 "SG feature.\n", name);
4985 features &= ~NETIF_F_TSO;
4986 }
4987
4988 if (features & NETIF_F_UFO) {
4989 if (!(features & NETIF_F_GEN_CSUM)) {
4990 if (name)
4991 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4992 "since no NETIF_F_HW_CSUM feature.\n",
4993 name);
4994 features &= ~NETIF_F_UFO;
4995 }
4996
4997 if (!(features & NETIF_F_SG)) {
4998 if (name)
4999 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5000 "since no NETIF_F_SG feature.\n", name);
5001 features &= ~NETIF_F_UFO;
5002 }
5003 }
5004
5005 return features;
5006}
5007EXPORT_SYMBOL(netdev_fix_features);
5008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009/**
Patrick Mullaneyfc4a7482009-12-03 15:59:22 -08005010 * netif_stacked_transfer_operstate - transfer operstate
5011 * @rootdev: the root or lower level device to transfer state from
5012 * @dev: the device to transfer operstate to
5013 *
5014 * Transfer operational state from root to device. This is normally
5015 * called when a stacking relationship exists between the root
5016 * device and the device(a leaf device).
5017 */
5018void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5019 struct net_device *dev)
5020{
5021 if (rootdev->operstate == IF_OPER_DORMANT)
5022 netif_dormant_on(dev);
5023 else
5024 netif_dormant_off(dev);
5025
5026 if (netif_carrier_ok(rootdev)) {
5027 if (!netif_carrier_ok(dev))
5028 netif_carrier_on(dev);
5029 } else {
5030 if (netif_carrier_ok(dev))
5031 netif_carrier_off(dev);
5032 }
5033}
5034EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5035
5036/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037 * register_netdevice - register a network device
5038 * @dev: device to register
5039 *
5040 * Take a completed network device structure and add it to the kernel
5041 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5042 * chain. 0 is returned on success. A negative errno code is returned
5043 * on a failure to set up the device, or if the name is a duplicate.
5044 *
5045 * Callers must hold the rtnl semaphore. You may want
5046 * register_netdev() instead of this.
5047 *
5048 * BUGS:
5049 * The locking appears insufficient to guarantee two parallel registers
5050 * will not get the same name.
5051 */
5052
5053int register_netdevice(struct net_device *dev)
5054{
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005056 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
5058 BUG_ON(dev_boot_phase);
5059 ASSERT_RTNL();
5060
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005061 might_sleep();
5062
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063 /* When net_device's are persistent, this will be fatal. */
5064 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005065 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
David S. Millerf1f28aa2008-07-15 00:08:33 -07005067 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07005068 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07005069 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 dev->iflink = -1;
5072
5073 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005074 if (dev->netdev_ops->ndo_init) {
5075 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076 if (ret) {
5077 if (ret > 0)
5078 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08005079 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 }
5081 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005082
Octavian Purdilad9031022009-11-18 02:36:59 +00005083 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5084 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005085 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086
Eric W. Biederman881d9662007-09-17 11:56:21 -07005087 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088 if (dev->iflink == -1)
5089 dev->iflink = dev->ifindex;
5090
Stephen Hemmingerd212f872007-06-27 00:47:37 -07005091 /* Fix illegal checksum combinations */
5092 if ((dev->features & NETIF_F_HW_CSUM) &&
5093 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5094 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5095 dev->name);
5096 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5097 }
5098
5099 if ((dev->features & NETIF_F_NO_CSUM) &&
5100 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5101 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5102 dev->name);
5103 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5104 }
5105
Herbert Xub63365a2008-10-23 01:11:29 -07005106 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07005108 /* Enable software GSO if SG is supported. */
5109 if (dev->features & NETIF_F_SG)
5110 dev->features |= NETIF_F_GSO;
5111
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005112 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00005113
5114 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5115 ret = notifier_to_errno(ret);
5116 if (ret)
5117 goto err_uninit;
5118
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005119 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005120 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005121 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005122 dev->reg_state = NETREG_REGISTERED;
5123
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 /*
5125 * Default initial state at registry is that the
5126 * device is present.
5127 */
5128
5129 set_bit(__LINK_STATE_PRESENT, &dev->state);
5130
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005133 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134
5135 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005136 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07005137 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005138 if (ret) {
5139 rollback_registered(dev);
5140 dev->reg_state = NETREG_UNREGISTERED;
5141 }
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005142 /*
5143 * Prevent userspace races by waiting until the network
5144 * device is fully setup before sending notifications.
5145 */
Patrick McHardya2835762010-02-26 06:34:51 +00005146 if (!dev->rtnl_link_ops ||
5147 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5148 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149
5150out:
5151 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005152
5153err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08005154 if (dev->netdev_ops->ndo_uninit)
5155 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07005156 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005158EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159
5160/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08005161 * init_dummy_netdev - init a dummy network device for NAPI
5162 * @dev: device to init
5163 *
5164 * This takes a network device structure and initialize the minimum
5165 * amount of fields so it can be used to schedule NAPI polls without
5166 * registering a full blown interface. This is to be used by drivers
5167 * that need to tie several hardware interfaces to a single NAPI
5168 * poll scheduler due to HW limitations.
5169 */
5170int init_dummy_netdev(struct net_device *dev)
5171{
5172 /* Clear everything. Note we don't initialize spinlocks
5173 * are they aren't supposed to be taken by any of the
5174 * NAPI code and this dummy netdev is supposed to be
5175 * only ever used for NAPI polls
5176 */
5177 memset(dev, 0, sizeof(struct net_device));
5178
5179 /* make sure we BUG if trying to hit standard
5180 * register/unregister code path
5181 */
5182 dev->reg_state = NETREG_DUMMY;
5183
5184 /* initialize the ref count */
5185 atomic_set(&dev->refcnt, 1);
5186
5187 /* NAPI wants this */
5188 INIT_LIST_HEAD(&dev->napi_list);
5189
5190 /* a dummy interface is started by default */
5191 set_bit(__LINK_STATE_PRESENT, &dev->state);
5192 set_bit(__LINK_STATE_START, &dev->state);
5193
5194 return 0;
5195}
5196EXPORT_SYMBOL_GPL(init_dummy_netdev);
5197
5198
5199/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 * register_netdev - register a network device
5201 * @dev: device to register
5202 *
5203 * Take a completed network device structure and add it to the kernel
5204 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5205 * chain. 0 is returned on success. A negative errno code is returned
5206 * on a failure to set up the device, or if the name is a duplicate.
5207 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005208 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 * and expands the device name if you passed a format string to
5210 * alloc_netdev.
5211 */
5212int register_netdev(struct net_device *dev)
5213{
5214 int err;
5215
5216 rtnl_lock();
5217
5218 /*
5219 * If the name is a format string the caller wants us to do a
5220 * name allocation.
5221 */
5222 if (strchr(dev->name, '%')) {
5223 err = dev_alloc_name(dev, dev->name);
5224 if (err < 0)
5225 goto out;
5226 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005227
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228 err = register_netdevice(dev);
5229out:
5230 rtnl_unlock();
5231 return err;
5232}
5233EXPORT_SYMBOL(register_netdev);
5234
5235/*
5236 * netdev_wait_allrefs - wait until all references are gone.
5237 *
5238 * This is called when unregistering network devices.
5239 *
5240 * Any protocol or device that holds a reference should register
5241 * for netdevice notification, and cleanup and put back the
5242 * reference if they receive an UNREGISTER event.
5243 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005244 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 */
5246static void netdev_wait_allrefs(struct net_device *dev)
5247{
5248 unsigned long rebroadcast_time, warning_time;
5249
Eric Dumazete014deb2009-11-17 05:59:21 +00005250 linkwatch_forget_dev(dev);
5251
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 rebroadcast_time = warning_time = jiffies;
5253 while (atomic_read(&dev->refcnt) != 0) {
5254 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005255 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256
5257 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005258 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005259 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
Octavian Purdila395264d2009-11-16 13:49:35 +00005260 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261
5262 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5263 &dev->state)) {
5264 /* We must not have linkwatch events
5265 * pending on unregister. If this
5266 * happens, we simply run the queue
5267 * unscheduled, resulting in a noop
5268 * for this device.
5269 */
5270 linkwatch_run_queue();
5271 }
5272
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005273 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274
5275 rebroadcast_time = jiffies;
5276 }
5277
5278 msleep(250);
5279
5280 if (time_after(jiffies, warning_time + 10 * HZ)) {
5281 printk(KERN_EMERG "unregister_netdevice: "
5282 "waiting for %s to become free. Usage "
5283 "count = %d\n",
5284 dev->name, atomic_read(&dev->refcnt));
5285 warning_time = jiffies;
5286 }
5287 }
5288}
5289
5290/* The sequence is:
5291 *
5292 * rtnl_lock();
5293 * ...
5294 * register_netdevice(x1);
5295 * register_netdevice(x2);
5296 * ...
5297 * unregister_netdevice(y1);
5298 * unregister_netdevice(y2);
5299 * ...
5300 * rtnl_unlock();
5301 * free_netdev(y1);
5302 * free_netdev(y2);
5303 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005304 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005306 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307 * without deadlocking with linkwatch via keventd.
5308 * 2) Since we run with the RTNL semaphore not held, we can sleep
5309 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005310 *
5311 * We must not return until all unregister events added during
5312 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005313 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314void netdev_run_todo(void)
5315{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005316 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005317
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005319 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005320
5321 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005322
Linus Torvalds1da177e2005-04-16 15:20:36 -07005323 while (!list_empty(&list)) {
5324 struct net_device *dev
stephen hemmingere5e26d72010-02-24 14:01:38 +00005325 = list_first_entry(&list, struct net_device, todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326 list_del(&dev->todo_list);
5327
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005328 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 printk(KERN_ERR "network todo '%s' but state %d\n",
5330 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005331 dump_stack();
5332 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005334
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005335 dev->reg_state = NETREG_UNREGISTERED;
5336
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005337 on_each_cpu(flush_backlog, dev, 1);
5338
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005339 netdev_wait_allrefs(dev);
5340
5341 /* paranoia */
5342 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005343 WARN_ON(dev->ip_ptr);
5344 WARN_ON(dev->ip6_ptr);
5345 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005346
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005347 if (dev->destructor)
5348 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005349
5350 /* Free network device */
5351 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353}
5354
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005355/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005356 * dev_txq_stats_fold - fold tx_queues stats
5357 * @dev: device to get statistics from
5358 * @stats: struct net_device_stats to hold results
5359 */
5360void dev_txq_stats_fold(const struct net_device *dev,
5361 struct net_device_stats *stats)
5362{
5363 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5364 unsigned int i;
5365 struct netdev_queue *txq;
5366
5367 for (i = 0; i < dev->num_tx_queues; i++) {
5368 txq = netdev_get_tx_queue(dev, i);
5369 tx_bytes += txq->tx_bytes;
5370 tx_packets += txq->tx_packets;
5371 tx_dropped += txq->tx_dropped;
5372 }
5373 if (tx_bytes || tx_packets || tx_dropped) {
5374 stats->tx_bytes = tx_bytes;
5375 stats->tx_packets = tx_packets;
5376 stats->tx_dropped = tx_dropped;
5377 }
5378}
5379EXPORT_SYMBOL(dev_txq_stats_fold);
5380
5381/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005382 * dev_get_stats - get network device statistics
5383 * @dev: device to get statistics from
5384 *
5385 * Get network statistics from device. The device driver may provide
5386 * its own method by setting dev->netdev_ops->get_stats; otherwise
5387 * the internal statistics structure is used.
5388 */
5389const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005390{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005391 const struct net_device_ops *ops = dev->netdev_ops;
5392
5393 if (ops->ndo_get_stats)
5394 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005395
Eric Dumazetd83345a2009-11-16 03:36:51 +00005396 dev_txq_stats_fold(dev, &dev->stats);
5397 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005398}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005399EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005400
David S. Millerdc2b4842008-07-08 17:18:23 -07005401static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005402 struct netdev_queue *queue,
5403 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005404{
David S. Millerdc2b4842008-07-08 17:18:23 -07005405 queue->dev = dev;
5406}
5407
David S. Millerbb949fb2008-07-08 16:55:56 -07005408static void netdev_init_queues(struct net_device *dev)
5409{
David S. Millere8a04642008-07-17 00:34:19 -07005410 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5411 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005412 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005413}
5414
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005416 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005417 * @sizeof_priv: size of private data to allocate space for
5418 * @name: device name format string
5419 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005420 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421 *
5422 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005423 * and performs basic initialization. Also allocates subquue structs
5424 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005426struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5427 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428{
David S. Millere8a04642008-07-17 00:34:19 -07005429 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005431 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005432 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005434 BUG_ON(strlen(name) >= sizeof(dev->name));
5435
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005436 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005437 if (sizeof_priv) {
5438 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005439 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005440 alloc_size += sizeof_priv;
5441 }
5442 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005443 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005445 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005446 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005447 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005448 return NULL;
5449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450
Stephen Hemminger79439862008-07-21 13:28:44 -07005451 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005452 if (!tx) {
5453 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5454 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005455 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005456 }
5457
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005458 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005460
5461 if (dev_addr_init(dev))
5462 goto free_tx;
5463
Jiri Pirkoccffad252009-05-22 23:22:17 +00005464 dev_unicast_init(dev);
5465
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005466 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467
David S. Millere8a04642008-07-17 00:34:19 -07005468 dev->_tx = tx;
5469 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005470 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005471
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005472 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473
David S. Millerbb949fb2008-07-08 16:55:56 -07005474 netdev_init_queues(dev);
5475
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005476 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5477 dev->ethtool_ntuple_list.count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08005478 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005479 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005480 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005481 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005482 setup(dev);
5483 strcpy(dev->name, name);
5484 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005485
5486free_tx:
5487 kfree(tx);
5488
5489free_p:
5490 kfree(p);
5491 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005492}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005493EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005494
5495/**
5496 * free_netdev - free network device
5497 * @dev: device
5498 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005499 * This function does the last stage of destroying an allocated device
5500 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005501 * If this is the last reference then it will be freed.
5502 */
5503void free_netdev(struct net_device *dev)
5504{
Herbert Xud565b0a2008-12-15 23:38:52 -08005505 struct napi_struct *p, *n;
5506
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005507 release_net(dev_net(dev));
5508
David S. Millere8a04642008-07-17 00:34:19 -07005509 kfree(dev->_tx);
5510
Jiri Pirkof001fde2009-05-05 02:48:28 +00005511 /* Flush device addresses */
5512 dev_addr_flush(dev);
5513
Peter P Waskiewicz Jr15682bc2010-02-10 20:03:05 -08005514 /* Clear ethtool n-tuple list */
5515 ethtool_ntuple_flush(dev);
5516
Herbert Xud565b0a2008-12-15 23:38:52 -08005517 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5518 netif_napi_del(p);
5519
Stephen Hemminger3041a062006-05-26 13:25:24 -07005520 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005521 if (dev->reg_state == NETREG_UNINITIALIZED) {
5522 kfree((char *)dev - dev->padded);
5523 return;
5524 }
5525
5526 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5527 dev->reg_state = NETREG_RELEASED;
5528
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005529 /* will free via device release */
5530 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005531}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005532EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005533
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005534/**
5535 * synchronize_net - Synchronize with packet receive processing
5536 *
5537 * Wait for packets currently being received to be done.
5538 * Does not block later packets from starting.
5539 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005540void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541{
5542 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005543 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005544}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005545EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546
5547/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005548 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005549 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005550 * @head: list
Jaswinder Singh Rajput6ebfbc02009-11-22 20:43:13 -08005551 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005552 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005553 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005554 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005555 *
5556 * Callers must hold the rtnl semaphore. You may want
5557 * unregister_netdev() instead of this.
5558 */
5559
Eric Dumazet44a08732009-10-27 07:03:04 +00005560void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561{
Herbert Xua6620712007-12-12 19:21:56 -08005562 ASSERT_RTNL();
5563
Eric Dumazet44a08732009-10-27 07:03:04 +00005564 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005565 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005566 } else {
5567 rollback_registered(dev);
5568 /* Finish processing unregister after unlock */
5569 net_set_todo(dev);
5570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005571}
Eric Dumazet44a08732009-10-27 07:03:04 +00005572EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005573
5574/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005575 * unregister_netdevice_many - unregister many devices
5576 * @head: list of devices
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005577 */
5578void unregister_netdevice_many(struct list_head *head)
5579{
5580 struct net_device *dev;
5581
5582 if (!list_empty(head)) {
5583 rollback_registered_many(head);
5584 list_for_each_entry(dev, head, unreg_list)
5585 net_set_todo(dev);
5586 }
5587}
Eric Dumazet63c80992009-10-27 07:06:49 +00005588EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005589
5590/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 * unregister_netdev - remove device from the kernel
5592 * @dev: device
5593 *
5594 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005595 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596 *
5597 * This is just a wrapper for unregister_netdevice that takes
5598 * the rtnl semaphore. In general you want to use this and not
5599 * unregister_netdevice.
5600 */
5601void unregister_netdev(struct net_device *dev)
5602{
5603 rtnl_lock();
5604 unregister_netdevice(dev);
5605 rtnl_unlock();
5606}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607EXPORT_SYMBOL(unregister_netdev);
5608
Eric W. Biedermance286d32007-09-12 13:53:49 +02005609/**
5610 * dev_change_net_namespace - move device to different nethost namespace
5611 * @dev: device
5612 * @net: network namespace
5613 * @pat: If not NULL name pattern to try if the current device name
5614 * is already taken in the destination network namespace.
5615 *
5616 * This function shuts down a device interface and moves it
5617 * to a new network namespace. On success 0 is returned, on
5618 * a failure a netagive errno code is returned.
5619 *
5620 * Callers must hold the rtnl semaphore.
5621 */
5622
5623int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5624{
Eric W. Biedermance286d32007-09-12 13:53:49 +02005625 int err;
5626
5627 ASSERT_RTNL();
5628
5629 /* Don't allow namespace local devices to be moved. */
5630 err = -EINVAL;
5631 if (dev->features & NETIF_F_NETNS_LOCAL)
5632 goto out;
5633
Eric W. Biederman38918452008-10-27 17:51:47 -07005634#ifdef CONFIG_SYSFS
5635 /* Don't allow real devices to be moved when sysfs
5636 * is enabled.
5637 */
5638 err = -EINVAL;
5639 if (dev->dev.parent)
5640 goto out;
5641#endif
5642
Eric W. Biedermance286d32007-09-12 13:53:49 +02005643 /* Ensure the device has been registrered */
5644 err = -EINVAL;
5645 if (dev->reg_state != NETREG_REGISTERED)
5646 goto out;
5647
5648 /* Get out if there is nothing todo */
5649 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005650 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005651 goto out;
5652
5653 /* Pick the destination device name, and ensure
5654 * we can use it in the destination network namespace.
5655 */
5656 err = -EEXIST;
Octavian Purdilad9031022009-11-18 02:36:59 +00005657 if (__dev_get_by_name(net, dev->name)) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005658 /* We get here if we can't use the current device name */
5659 if (!pat)
5660 goto out;
Octavian Purdilad9031022009-11-18 02:36:59 +00005661 if (dev_get_valid_name(net, pat, dev->name, 1))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005662 goto out;
5663 }
5664
5665 /*
5666 * And now a mini version of register_netdevice unregister_netdevice.
5667 */
5668
5669 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005670 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005671
5672 /* And unlink it from device chain */
5673 err = -ENODEV;
5674 unlist_netdevice(dev);
5675
5676 synchronize_net();
5677
5678 /* Shutdown queueing discipline. */
5679 dev_shutdown(dev);
5680
5681 /* Notify protocols, that we are about to destroy
5682 this device. They should clean all the things.
5683 */
5684 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Eric W. Biedermana5ee1552009-11-29 15:45:58 +00005685 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005686
5687 /*
5688 * Flush the unicast and multicast chains
5689 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005690 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005691 dev_addr_discard(dev);
5692
Eric W. Biederman38918452008-10-27 17:51:47 -07005693 netdev_unregister_kobject(dev);
5694
Eric W. Biedermance286d32007-09-12 13:53:49 +02005695 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005696 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005697
Eric W. Biedermance286d32007-09-12 13:53:49 +02005698 /* If there is an ifindex conflict assign a new one */
5699 if (__dev_get_by_index(net, dev->ifindex)) {
5700 int iflink = (dev->iflink == dev->ifindex);
5701 dev->ifindex = dev_new_index(net);
5702 if (iflink)
5703 dev->iflink = dev->ifindex;
5704 }
5705
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005706 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005707 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005708 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005709
5710 /* Add the device back in the hashes */
5711 list_netdevice(dev);
5712
5713 /* Notify protocols, that a new device appeared. */
5714 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5715
Eric W. Biedermand90a9092009-12-12 22:11:15 +00005716 /*
5717 * Prevent userspace races by waiting until the network
5718 * device is fully setup before sending notifications.
5719 */
5720 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5721
Eric W. Biedermance286d32007-09-12 13:53:49 +02005722 synchronize_net();
5723 err = 0;
5724out:
5725 return err;
5726}
Johannes Berg463d0182009-07-14 00:33:35 +02005727EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005728
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729static int dev_cpu_callback(struct notifier_block *nfb,
5730 unsigned long action,
5731 void *ocpu)
5732{
5733 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005734 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735 struct sk_buff *skb;
5736 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5737 struct softnet_data *sd, *oldsd;
5738
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005739 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 return NOTIFY_OK;
5741
5742 local_irq_disable();
5743 cpu = smp_processor_id();
5744 sd = &per_cpu(softnet_data, cpu);
5745 oldsd = &per_cpu(softnet_data, oldcpu);
5746
5747 /* Find end of our completion_queue. */
5748 list_skb = &sd->completion_queue;
5749 while (*list_skb)
5750 list_skb = &(*list_skb)->next;
5751 /* Append completion queue from offline CPU. */
5752 *list_skb = oldsd->completion_queue;
5753 oldsd->completion_queue = NULL;
5754
5755 /* Find end of our output_queue. */
5756 list_net = &sd->output_queue;
5757 while (*list_net)
5758 list_net = &(*list_net)->next_sched;
5759 /* Append output queue from offline CPU. */
5760 *list_net = oldsd->output_queue;
5761 oldsd->output_queue = NULL;
5762
5763 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5764 local_irq_enable();
5765
5766 /* Process offline CPU's input_pkt_queue */
5767 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5768 netif_rx(skb);
5769
5770 return NOTIFY_OK;
5771}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772
5773
Herbert Xu7f353bf2007-08-10 15:47:58 -07005774/**
Herbert Xub63365a2008-10-23 01:11:29 -07005775 * netdev_increment_features - increment feature set by one
5776 * @all: current feature set
5777 * @one: new feature set
5778 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005779 *
5780 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005781 * @one to the master device with current feature set @all. Will not
5782 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005783 */
Herbert Xub63365a2008-10-23 01:11:29 -07005784unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5785 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005786{
Herbert Xub63365a2008-10-23 01:11:29 -07005787 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005788 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005789 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5790 else if (mask & NETIF_F_ALL_CSUM) {
5791 /* If one device supports v4/v6 checksumming, set for all. */
5792 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5793 !(all & NETIF_F_GEN_CSUM)) {
5794 all &= ~NETIF_F_ALL_CSUM;
5795 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5796 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005797
Herbert Xub63365a2008-10-23 01:11:29 -07005798 /* If one device supports hw checksumming, set for all. */
5799 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5800 all &= ~NETIF_F_ALL_CSUM;
5801 all |= NETIF_F_HW_CSUM;
5802 }
5803 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005804
Herbert Xub63365a2008-10-23 01:11:29 -07005805 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005806
Herbert Xub63365a2008-10-23 01:11:29 -07005807 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005808 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005809 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005810
5811 return all;
5812}
Herbert Xub63365a2008-10-23 01:11:29 -07005813EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005814
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005815static struct hlist_head *netdev_create_hash(void)
5816{
5817 int i;
5818 struct hlist_head *hash;
5819
5820 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5821 if (hash != NULL)
5822 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5823 INIT_HLIST_HEAD(&hash[i]);
5824
5825 return hash;
5826}
5827
Eric W. Biederman881d9662007-09-17 11:56:21 -07005828/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005829static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005830{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005831 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005832
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005833 net->dev_name_head = netdev_create_hash();
5834 if (net->dev_name_head == NULL)
5835 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005836
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005837 net->dev_index_head = netdev_create_hash();
5838 if (net->dev_index_head == NULL)
5839 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005840
5841 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005842
5843err_idx:
5844 kfree(net->dev_name_head);
5845err_name:
5846 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005847}
5848
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005849/**
5850 * netdev_drivername - network driver for the device
5851 * @dev: network device
5852 * @buffer: buffer for resulting name
5853 * @len: size of buffer
5854 *
5855 * Determine network driver for device.
5856 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005857char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005858{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005859 const struct device_driver *driver;
5860 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005861
5862 if (len <= 0 || !buffer)
5863 return buffer;
5864 buffer[0] = 0;
5865
5866 parent = dev->dev.parent;
5867
5868 if (!parent)
5869 return buffer;
5870
5871 driver = parent->driver;
5872 if (driver && driver->name)
5873 strlcpy(buffer, driver->name, len);
5874 return buffer;
5875}
5876
Pavel Emelyanov46650792007-10-08 20:38:39 -07005877static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005878{
5879 kfree(net->dev_name_head);
5880 kfree(net->dev_index_head);
5881}
5882
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005883static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005884 .init = netdev_init,
5885 .exit = netdev_exit,
5886};
5887
Pavel Emelyanov46650792007-10-08 20:38:39 -07005888static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005889{
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005890 struct net_device *dev, *aux;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005891 /*
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005892 * Push all migratable network devices back to the
Eric W. Biedermance286d32007-09-12 13:53:49 +02005893 * initial network namespace
5894 */
5895 rtnl_lock();
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005896 for_each_netdev_safe(net, dev, aux) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005897 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005898 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005899
5900 /* Ignore unmoveable devices (i.e. loopback) */
5901 if (dev->features & NETIF_F_NETNS_LOCAL)
5902 continue;
5903
Eric W. Biedermane008b5f2009-11-29 22:25:30 +00005904 /* Leave virtual devices for the generic cleanup */
5905 if (dev->rtnl_link_ops)
5906 continue;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005907
Eric W. Biedermance286d32007-09-12 13:53:49 +02005908 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005909 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5910 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005911 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005912 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005913 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005914 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005915 }
5916 }
5917 rtnl_unlock();
5918}
5919
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005920static void __net_exit default_device_exit_batch(struct list_head *net_list)
5921{
5922 /* At exit all network devices most be removed from a network
5923 * namespace. Do this in the reverse order of registeration.
5924 * Do this across as many network namespaces as possible to
5925 * improve batching efficiency.
5926 */
5927 struct net_device *dev;
5928 struct net *net;
5929 LIST_HEAD(dev_kill_list);
5930
5931 rtnl_lock();
5932 list_for_each_entry(net, net_list, exit_list) {
5933 for_each_netdev_reverse(net, dev) {
5934 if (dev->rtnl_link_ops)
5935 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5936 else
5937 unregister_netdevice_queue(dev, &dev_kill_list);
5938 }
5939 }
5940 unregister_netdevice_many(&dev_kill_list);
5941 rtnl_unlock();
5942}
5943
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005944static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005945 .exit = default_device_exit,
Eric W. Biederman04dc7f6b2009-12-03 02:29:04 +00005946 .exit_batch = default_device_exit_batch,
Eric W. Biedermance286d32007-09-12 13:53:49 +02005947};
5948
Linus Torvalds1da177e2005-04-16 15:20:36 -07005949/*
5950 * Initialize the DEV module. At boot time this walks the device list and
5951 * unhooks any devices that fail to initialise (normally hardware not
5952 * present) and leaves us with a valid list of present and active devices.
5953 *
5954 */
5955
5956/*
5957 * This is called single threaded during boot, so no need
5958 * to take the rtnl semaphore.
5959 */
5960static int __init net_dev_init(void)
5961{
5962 int i, rc = -ENOMEM;
5963
5964 BUG_ON(!dev_boot_phase);
5965
Linus Torvalds1da177e2005-04-16 15:20:36 -07005966 if (dev_proc_init())
5967 goto out;
5968
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005969 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005970 goto out;
5971
5972 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005973 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005974 INIT_LIST_HEAD(&ptype_base[i]);
5975
Eric W. Biederman881d9662007-09-17 11:56:21 -07005976 if (register_pernet_subsys(&netdev_net_ops))
5977 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005978
5979 /*
5980 * Initialise the packet receive queues.
5981 */
5982
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005983 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005984 struct softnet_data *queue;
5985
5986 queue = &per_cpu(softnet_data, i);
5987 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988 queue->completion_queue = NULL;
5989 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005990
5991 queue->backlog.poll = process_backlog;
5992 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005993 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005994 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005995 }
5996
Linus Torvalds1da177e2005-04-16 15:20:36 -07005997 dev_boot_phase = 0;
5998
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005999 /* The loopback device is special if any other network devices
6000 * is present in a network namespace the loopback device must
6001 * be present. Since we now dynamically allocate and free the
6002 * loopback device ensure this invariant is maintained by
6003 * keeping the loopback device as the first device on the
6004 * list of network devices. Ensuring the loopback devices
6005 * is the first device that appears and the last network device
6006 * that disappears.
6007 */
6008 if (register_pernet_device(&loopback_net_ops))
6009 goto out;
6010
6011 if (register_pernet_device(&default_device_ops))
6012 goto out;
6013
Carlos R. Mafra962cf362008-05-15 11:15:37 -03006014 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6015 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006016
6017 hotcpu_notifier(dev_cpu_callback, 0);
6018 dst_init();
6019 dev_mcast_init();
6020 rc = 0;
6021out:
6022 return rc;
6023}
6024
6025subsys_initcall(net_dev_init);
6026
Krishna Kumare88721f2009-02-18 17:55:02 -08006027static int __init initialize_hashrnd(void)
6028{
6029 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
6030 return 0;
6031}
6032
6033late_initcall_sync(initialize_hashrnd);
6034