blob: c128af708ebf9911b0152a5b3f78c27231451023 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
stephen hemminger08e98972009-11-10 07:20:34 +000082#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080084#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070094#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <linux/notifier.h>
96#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020097#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <net/sock.h>
99#include <linux/rtnetlink.h>
100#include <linux/proc_fs.h>
101#include <linux/seq_file.h>
102#include <linux/stat.h>
103#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700104#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105#include <net/dst.h>
106#include <net/pkt_sched.h>
107#include <net/checksum.h>
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700125#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700126#include <linux/ipv6.h>
127#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700128#include <linux/jhash.h>
129#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700130#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700132#include "net-sysfs.h"
133
Herbert Xud565b0a2008-12-15 23:38:52 -0800134/* Instead of increasing this, you should create a hash table. */
135#define MAX_GRO_SKBS 8
136
Herbert Xu5d38a072009-01-04 16:13:40 -0800137/* This should be increased if a protocol with a bigger head is added. */
138#define GRO_MAX_HEAD (MAX_HEADER + 128)
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/*
141 * The list of packet types we will receive (as opposed to discard)
142 * and the routines to invoke.
143 *
144 * Why 16. Because with 16 the only overlap we get on a hash of the
145 * low nibble of the protocol value is RARP/SNAP/X.25.
146 *
147 * NOTE: That is no longer true with the addition of VLAN tags. Not
148 * sure which should go first, but I bet it won't make much
149 * difference if we are running VLANs. The good news is that
150 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700151 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 * --BLG
153 *
154 * 0800 IP
155 * 8100 802.1Q VLAN
156 * 0001 802.3
157 * 0002 AX.25
158 * 0004 802.2
159 * 8035 RARP
160 * 0005 SNAP
161 * 0805 X.25
162 * 0806 ARP
163 * 8137 IPX
164 * 0009 Localtalk
165 * 86DD IPv6
166 */
167
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800168#define PTYPE_HASH_SIZE (16)
169#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800172static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700173static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700176 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 * semaphore.
178 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800179 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 *
181 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700182 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * actual updates. This allows pure readers to access the list even
184 * while a writer is preparing to update it.
185 *
186 * To put it another way, dev_base_lock is held for writing only to
187 * protect against pure readers; the rtnl semaphore provides the
188 * protection against other writers.
189 *
190 * See, for example usages, register_netdevice() and
191 * unregister_netdevice(), which must be called with the rtnl
192 * semaphore held.
193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
Eric W. Biederman881d9662007-09-17 11:56:21 -0700197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
stephen hemminger08e98972009-11-10 07:20:34 +0000200 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700205 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
207
Eric W. Biedermance286d32007-09-12 13:53:49 +0200208/* Device list insertion */
209static int list_netdevice(struct net_device *dev)
210{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900211 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200212
213 ASSERT_RTNL();
214
215 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800216 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000217 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000218 hlist_add_head_rcu(&dev->index_hlist,
219 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200220 write_unlock_bh(&dev_base_lock);
221 return 0;
222}
223
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000224/* Device list removal
225 * caller must respect a RCU grace period before freeing/reusing dev
226 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800233 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000234 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000235 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700251EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
David S. Millercf508b12008-07-22 14:16:42 -0700253#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700254/*
David S. Millerc773e842008-07-08 23:13:53 -0700255 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700256 * according to dev->type
257 */
258static const unsigned short netdev_lock_type[] =
259 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
260 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
261 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
262 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
263 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
264 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
265 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
266 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
267 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
268 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
269 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
270 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
271 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800272 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400273 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000274 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700275
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700276static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700277 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
278 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
279 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
280 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
281 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
282 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
283 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
284 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
285 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
286 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
287 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
288 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
289 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800290 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400291 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000292 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
294static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700295static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700296
297static inline unsigned short netdev_lock_pos(unsigned short dev_type)
298{
299 int i;
300
301 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
302 if (netdev_lock_type[i] == dev_type)
303 return i;
304 /* the last key is used by default */
305 return ARRAY_SIZE(netdev_lock_type) - 1;
306}
307
David S. Millercf508b12008-07-22 14:16:42 -0700308static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
309 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700310{
311 int i;
312
313 i = netdev_lock_pos(dev_type);
314 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
315 netdev_lock_name[i]);
316}
David S. Millercf508b12008-07-22 14:16:42 -0700317
318static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
319{
320 int i;
321
322 i = netdev_lock_pos(dev->type);
323 lockdep_set_class_and_name(&dev->addr_list_lock,
324 &netdev_addr_lock_key[i],
325 netdev_lock_name[i]);
326}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700327#else
David S. Millercf508b12008-07-22 14:16:42 -0700328static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
329 unsigned short dev_type)
330{
331}
332static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700333{
334}
335#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337/*******************************************************************************
338
339 Protocol management and registration routines
340
341*******************************************************************************/
342
343/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 * Add a protocol ID to the list. Now that the input handler is
345 * smarter we can dispense with all the messy stuff that used to be
346 * here.
347 *
348 * BEWARE!!! Protocol handlers, mangling input packets,
349 * MUST BE last in hash buckets and checking protocol handlers
350 * MUST start from promiscuous ptype_all chain in net_bh.
351 * It is true now, do not change it.
352 * Explanation follows: if protocol handler, mangling packet, will
353 * be the first on list, it is not able to sense, that packet
354 * is cloned and should be copied-on-write, so that it will
355 * change it and subsequent readers will get broken packet.
356 * --ANK (980803)
357 */
358
359/**
360 * dev_add_pack - add packet handler
361 * @pt: packet type declaration
362 *
363 * Add a protocol handler to the networking stack. The passed &packet_type
364 * is linked into kernel lists and may not be freed until it has been
365 * removed from the kernel lists.
366 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900367 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 * guarantee all CPU's that are in middle of receiving packets
369 * will see the new packet type (until the next received packet).
370 */
371
372void dev_add_pack(struct packet_type *pt)
373{
374 int hash;
375
376 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700377 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700379 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800380 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 list_add_rcu(&pt->list, &ptype_base[hash]);
382 }
383 spin_unlock_bh(&ptype_lock);
384}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700385EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/**
388 * __dev_remove_pack - remove packet handler
389 * @pt: packet type declaration
390 *
391 * Remove a protocol handler that was previously added to the kernel
392 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
393 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900394 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 *
396 * The packet type might still be in use by receivers
397 * and must not be freed until after all the CPU's have gone
398 * through a quiescent state.
399 */
400void __dev_remove_pack(struct packet_type *pt)
401{
402 struct list_head *head;
403 struct packet_type *pt1;
404
405 spin_lock_bh(&ptype_lock);
406
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700407 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700409 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800410 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 list_for_each_entry(pt1, head, list) {
413 if (pt == pt1) {
414 list_del_rcu(&pt->list);
415 goto out;
416 }
417 }
418
419 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
420out:
421 spin_unlock_bh(&ptype_lock);
422}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700423EXPORT_SYMBOL(__dev_remove_pack);
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425/**
426 * dev_remove_pack - remove packet handler
427 * @pt: packet type declaration
428 *
429 * Remove a protocol handler that was previously added to the kernel
430 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
431 * from the kernel lists and can be freed or reused once this function
432 * returns.
433 *
434 * This call sleeps to guarantee that no CPU is looking at the packet
435 * type after return.
436 */
437void dev_remove_pack(struct packet_type *pt)
438{
439 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 synchronize_net();
442}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700443EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445/******************************************************************************
446
447 Device Boot-time Settings Routines
448
449*******************************************************************************/
450
451/* Boot time configuration table */
452static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
453
454/**
455 * netdev_boot_setup_add - add new setup entry
456 * @name: name of the device
457 * @map: configured settings for the device
458 *
459 * Adds new setup entry to the dev_boot_setup list. The function
460 * returns 0 on error and 1 on success. This is a generic routine to
461 * all netdevices.
462 */
463static int netdev_boot_setup_add(char *name, struct ifmap *map)
464{
465 struct netdev_boot_setup *s;
466 int i;
467
468 s = dev_boot_setup;
469 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
470 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
471 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700472 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 memcpy(&s[i].map, map, sizeof(s[i].map));
474 break;
475 }
476 }
477
478 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
479}
480
481/**
482 * netdev_boot_setup_check - check boot time settings
483 * @dev: the netdevice
484 *
485 * Check boot time settings for the device.
486 * The found settings are set for the device to be used
487 * later in the device probing.
488 * Returns 0 if no settings found, 1 if they are.
489 */
490int netdev_boot_setup_check(struct net_device *dev)
491{
492 struct netdev_boot_setup *s = dev_boot_setup;
493 int i;
494
495 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
496 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700497 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 dev->irq = s[i].map.irq;
499 dev->base_addr = s[i].map.base_addr;
500 dev->mem_start = s[i].map.mem_start;
501 dev->mem_end = s[i].map.mem_end;
502 return 1;
503 }
504 }
505 return 0;
506}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700507EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
509
510/**
511 * netdev_boot_base - get address from boot time settings
512 * @prefix: prefix for network device
513 * @unit: id for network device
514 *
515 * Check boot time settings for the base address of device.
516 * The found settings are set for the device to be used
517 * later in the device probing.
518 * Returns 0 if no settings found.
519 */
520unsigned long netdev_boot_base(const char *prefix, int unit)
521{
522 const struct netdev_boot_setup *s = dev_boot_setup;
523 char name[IFNAMSIZ];
524 int i;
525
526 sprintf(name, "%s%d", prefix, unit);
527
528 /*
529 * If device already registered then return base of 1
530 * to indicate not to probe for this interface
531 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700532 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 return 1;
534
535 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
536 if (!strcmp(name, s[i].name))
537 return s[i].map.base_addr;
538 return 0;
539}
540
541/*
542 * Saves at boot time configured settings for any netdevice.
543 */
544int __init netdev_boot_setup(char *str)
545{
546 int ints[5];
547 struct ifmap map;
548
549 str = get_options(str, ARRAY_SIZE(ints), ints);
550 if (!str || !*str)
551 return 0;
552
553 /* Save settings */
554 memset(&map, 0, sizeof(map));
555 if (ints[0] > 0)
556 map.irq = ints[1];
557 if (ints[0] > 1)
558 map.base_addr = ints[2];
559 if (ints[0] > 2)
560 map.mem_start = ints[3];
561 if (ints[0] > 3)
562 map.mem_end = ints[4];
563
564 /* Add new entry to the list */
565 return netdev_boot_setup_add(str, &map);
566}
567
568__setup("netdev=", netdev_boot_setup);
569
570/*******************************************************************************
571
572 Device Interface Subroutines
573
574*******************************************************************************/
575
576/**
577 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700578 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 * @name: name to find
580 *
581 * Find an interface by name. Must be called under RTNL semaphore
582 * or @dev_base_lock. If the name is found a pointer to the device
583 * is returned. If the name is not found then %NULL is returned. The
584 * reference counters are not incremented so the caller must be
585 * careful with locks.
586 */
587
Eric W. Biederman881d9662007-09-17 11:56:21 -0700588struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
590 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700591 struct net_device *dev;
592 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700594 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 if (!strncmp(dev->name, name, IFNAMSIZ))
596 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700597
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 return NULL;
599}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700600EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000603 * dev_get_by_name_rcu - find a device by its name
604 * @net: the applicable net namespace
605 * @name: name to find
606 *
607 * Find an interface by name.
608 * If the name is found a pointer to the device is returned.
609 * If the name is not found then %NULL is returned.
610 * The reference counters are not incremented so the caller must be
611 * careful with locks. The caller must hold RCU lock.
612 */
613
614struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
615{
616 struct hlist_node *p;
617 struct net_device *dev;
618 struct hlist_head *head = dev_name_hash(net, name);
619
620 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
621 if (!strncmp(dev->name, name, IFNAMSIZ))
622 return dev;
623
624 return NULL;
625}
626EXPORT_SYMBOL(dev_get_by_name_rcu);
627
628/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700630 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 * @name: name to find
632 *
633 * Find an interface by name. This can be called from any
634 * context and does its own locking. The returned handle has
635 * the usage count incremented and the caller must use dev_put() to
636 * release it when it is no longer needed. %NULL is returned if no
637 * matching device is found.
638 */
639
Eric W. Biederman881d9662007-09-17 11:56:21 -0700640struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
642 struct net_device *dev;
643
Eric Dumazet72c95282009-10-30 07:11:27 +0000644 rcu_read_lock();
645 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 if (dev)
647 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000648 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return dev;
650}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700651EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653/**
654 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @ifindex: index of device
657 *
658 * Search for an interface by index. Returns %NULL if the device
659 * is not found or a pointer to the device. The device has not
660 * had its reference counter increased so the caller must be careful
661 * about locking. The caller must hold either the RTNL semaphore
662 * or @dev_base_lock.
663 */
664
Eric W. Biederman881d9662007-09-17 11:56:21 -0700665struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
667 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700668 struct net_device *dev;
669 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700671 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (dev->ifindex == ifindex)
673 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 return NULL;
676}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700677EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000679/**
680 * dev_get_by_index_rcu - find a device by its ifindex
681 * @net: the applicable net namespace
682 * @ifindex: index of device
683 *
684 * Search for an interface by index. Returns %NULL if the device
685 * is not found or a pointer to the device. The device has not
686 * had its reference counter increased so the caller must be careful
687 * about locking. The caller must hold RCU lock.
688 */
689
690struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
691{
692 struct hlist_node *p;
693 struct net_device *dev;
694 struct hlist_head *head = dev_index_hash(net, ifindex);
695
696 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
697 if (dev->ifindex == ifindex)
698 return dev;
699
700 return NULL;
701}
702EXPORT_SYMBOL(dev_get_by_index_rcu);
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705/**
706 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700707 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 * @ifindex: index of device
709 *
710 * Search for an interface by index. Returns NULL if the device
711 * is not found or a pointer to the device. The device returned has
712 * had a reference added and the pointer is safe until the user calls
713 * dev_put to indicate they have finished with it.
714 */
715
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717{
718 struct net_device *dev;
719
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000720 rcu_read_lock();
721 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (dev)
723 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000724 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return dev;
726}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700727EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729/**
730 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700731 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 * @type: media type of device
733 * @ha: hardware address
734 *
735 * Search for an interface by MAC address. Returns NULL if the device
736 * is not found or a pointer to the device. The caller must hold the
737 * rtnl semaphore. The returned device has not had its ref count increased
738 * and the caller must therefore be careful about locking
739 *
740 * BUGS:
741 * If the API was consistent this would be __dev_get_by_hwaddr
742 */
743
Eric W. Biederman881d9662007-09-17 11:56:21 -0700744struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
746 struct net_device *dev;
747
748 ASSERT_RTNL();
749
Denis V. Lunev81103a52007-12-12 10:47:38 -0800750 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (dev->type == type &&
752 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 return dev;
754
755 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300757EXPORT_SYMBOL(dev_getbyhwaddr);
758
Eric W. Biederman881d9662007-09-17 11:56:21 -0700759struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700760{
761 struct net_device *dev;
762
763 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700764 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700765 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700766 return dev;
767
768 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700769}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700770EXPORT_SYMBOL(__dev_getfirstbyhwtype);
771
Eric W. Biederman881d9662007-09-17 11:56:21 -0700772struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
774 struct net_device *dev;
775
776 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700777 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700778 if (dev)
779 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 rtnl_unlock();
781 return dev;
782}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783EXPORT_SYMBOL(dev_getfirstbyhwtype);
784
785/**
786 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700787 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 * @if_flags: IFF_* values
789 * @mask: bitmask of bits in if_flags to check
790 *
791 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900792 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 * had a reference added and the pointer is safe until the user calls
794 * dev_put to indicate they have finished with it.
795 */
796
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700797struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
798 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700800 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Pavel Emelianov7562f872007-05-03 15:13:45 -0700802 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800803 rcu_read_lock();
804 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 if (((dev->flags ^ if_flags) & mask) == 0) {
806 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700807 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 break;
809 }
810 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800811 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700812 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700814EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
816/**
817 * dev_valid_name - check if name is okay for network device
818 * @name: name string
819 *
820 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700821 * to allow sysfs to work. We also disallow any kind of
822 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800824int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700826 if (*name == '\0')
827 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700828 if (strlen(name) >= IFNAMSIZ)
829 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700830 if (!strcmp(name, ".") || !strcmp(name, ".."))
831 return 0;
832
833 while (*name) {
834 if (*name == '/' || isspace(*name))
835 return 0;
836 name++;
837 }
838 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700840EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200843 * __dev_alloc_name - allocate a name for a device
844 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200846 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 *
848 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700849 * id. It scans list of devices to build up a free map, then chooses
850 * the first empty slot. The caller must hold the dev_base or rtnl lock
851 * while allocating the name and adding the device in order to avoid
852 * duplicates.
853 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
854 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 */
856
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200857static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
859 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 const char *p;
861 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700862 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 struct net_device *d;
864
865 p = strnchr(name, IFNAMSIZ-1, '%');
866 if (p) {
867 /*
868 * Verify the string as this thing may have come from
869 * the user. There must be either one "%d" and no other "%"
870 * characters.
871 */
872 if (p[1] != 'd' || strchr(p + 2, '%'))
873 return -EINVAL;
874
875 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700876 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 if (!inuse)
878 return -ENOMEM;
879
Eric W. Biederman881d9662007-09-17 11:56:21 -0700880 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 if (!sscanf(d->name, name, &i))
882 continue;
883 if (i < 0 || i >= max_netdevices)
884 continue;
885
886 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200887 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 if (!strncmp(buf, d->name, IFNAMSIZ))
889 set_bit(i, inuse);
890 }
891
892 i = find_first_zero_bit(inuse, max_netdevices);
893 free_page((unsigned long) inuse);
894 }
895
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200896 snprintf(buf, IFNAMSIZ, name, i);
897 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 /* It is possible to run out of possible slots
901 * when the name is long and there isn't enough space left
902 * for the digits, or if all bits are used.
903 */
904 return -ENFILE;
905}
906
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200907/**
908 * dev_alloc_name - allocate a name for a device
909 * @dev: device
910 * @name: name format string
911 *
912 * Passed a format string - eg "lt%d" it will try and find a suitable
913 * id. It scans list of devices to build up a free map, then chooses
914 * the first empty slot. The caller must hold the dev_base or rtnl lock
915 * while allocating the name and adding the device in order to avoid
916 * duplicates.
917 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
918 * Returns the number of the unit assigned or a negative errno code.
919 */
920
921int dev_alloc_name(struct net_device *dev, const char *name)
922{
923 char buf[IFNAMSIZ];
924 struct net *net;
925 int ret;
926
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900927 BUG_ON(!dev_net(dev));
928 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200929 ret = __dev_alloc_name(net, name, buf);
930 if (ret >= 0)
931 strlcpy(dev->name, buf, IFNAMSIZ);
932 return ret;
933}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700934EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937/**
938 * dev_change_name - change name of a device
939 * @dev: device
940 * @newname: name (or format string) must be at least IFNAMSIZ
941 *
942 * Change name of a device, can pass format strings "eth%d".
943 * for wildcarding.
944 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700945int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Herbert Xufcc5a032007-07-30 17:03:38 -0700947 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700949 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700950 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900953 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900955 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 if (dev->flags & IFF_UP)
957 return -EBUSY;
958
959 if (!dev_valid_name(newname))
960 return -EINVAL;
961
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700962 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
963 return 0;
964
Herbert Xufcc5a032007-07-30 17:03:38 -0700965 memcpy(oldname, dev->name, IFNAMSIZ);
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 if (strchr(newname, '%')) {
968 err = dev_alloc_name(dev, newname);
969 if (err < 0)
970 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700971 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 return -EEXIST;
973 else
974 strlcpy(dev->name, newname, IFNAMSIZ);
975
Herbert Xufcc5a032007-07-30 17:03:38 -0700976rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700977 /* For now only devices in the initial network namespace
978 * are in sysfs.
979 */
980 if (net == &init_net) {
981 ret = device_rename(&dev->dev, dev->name);
982 if (ret) {
983 memcpy(dev->name, oldname, IFNAMSIZ);
984 return ret;
985 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700986 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700987
988 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600989 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000990 write_unlock_bh(&dev_base_lock);
991
992 synchronize_rcu();
993
994 write_lock_bh(&dev_base_lock);
995 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700996 write_unlock_bh(&dev_base_lock);
997
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700998 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700999 ret = notifier_to_errno(ret);
1000
1001 if (ret) {
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001002 /* err >= 0 after dev_alloc_name() or stores the first errno */
1003 if (err >= 0) {
Herbert Xufcc5a032007-07-30 17:03:38 -07001004 err = ret;
1005 memcpy(dev->name, oldname, IFNAMSIZ);
1006 goto rollback;
Eric Dumazet91e9c07b2009-11-15 23:30:24 +00001007 } else {
1008 printk(KERN_ERR
1009 "%s: name change rollback failed: %d.\n",
1010 dev->name, ret);
Herbert Xufcc5a032007-07-30 17:03:38 -07001011 }
1012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
1014 return err;
1015}
1016
1017/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001018 * dev_set_alias - change ifalias of a device
1019 * @dev: device
1020 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001021 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001022 *
1023 * Set ifalias for a device,
1024 */
1025int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1026{
1027 ASSERT_RTNL();
1028
1029 if (len >= IFALIASZ)
1030 return -EINVAL;
1031
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001032 if (!len) {
1033 if (dev->ifalias) {
1034 kfree(dev->ifalias);
1035 dev->ifalias = NULL;
1036 }
1037 return 0;
1038 }
1039
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001040 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001041 if (!dev->ifalias)
1042 return -ENOMEM;
1043
1044 strlcpy(dev->ifalias, alias, len+1);
1045 return len;
1046}
1047
1048
1049/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001050 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001051 * @dev: device to cause notification
1052 *
1053 * Called to indicate a device has changed features.
1054 */
1055void netdev_features_change(struct net_device *dev)
1056{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001057 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001058}
1059EXPORT_SYMBOL(netdev_features_change);
1060
1061/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 * netdev_state_change - device changes state
1063 * @dev: device to cause notification
1064 *
1065 * Called to indicate a device has changed state. This function calls
1066 * the notifier chains for netdev_chain and sends a NEWLINK message
1067 * to the routing socket.
1068 */
1069void netdev_state_change(struct net_device *dev)
1070{
1071 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001072 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1074 }
1075}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001076EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Moni Shoua75c78502009-09-15 02:37:40 -07001078void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001079{
Moni Shoua75c78502009-09-15 02:37:40 -07001080 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001081}
1082EXPORT_SYMBOL(netdev_bonding_change);
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084/**
1085 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001086 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * @name: name of interface
1088 *
1089 * If a network interface is not present and the process has suitable
1090 * privileges this function loads the module. If module loading is not
1091 * available in this kernel then it becomes a nop.
1092 */
1093
Eric W. Biederman881d9662007-09-17 11:56:21 -07001094void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001096 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Eric Dumazet72c95282009-10-30 07:11:27 +00001098 rcu_read_lock();
1099 dev = dev_get_by_name_rcu(net, name);
1100 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Eric Parisa8f80e82009-08-13 09:44:51 -04001102 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 request_module("%s", name);
1104}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001105EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107/**
1108 * dev_open - prepare an interface for use.
1109 * @dev: device to open
1110 *
1111 * Takes a device from down to up state. The device's private open
1112 * function is invoked and then the multicast lists are loaded. Finally
1113 * the device is moved into the up state and a %NETDEV_UP message is
1114 * sent to the netdev notifier chain.
1115 *
1116 * Calling this function on an active interface is a nop. On a failure
1117 * a negative errno code is returned.
1118 */
1119int dev_open(struct net_device *dev)
1120{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001121 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001122 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001124 ASSERT_RTNL();
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 /*
1127 * Is it already up?
1128 */
1129
1130 if (dev->flags & IFF_UP)
1131 return 0;
1132
1133 /*
1134 * Is it even present?
1135 */
1136 if (!netif_device_present(dev))
1137 return -ENODEV;
1138
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001139 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1140 ret = notifier_to_errno(ret);
1141 if (ret)
1142 return ret;
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 /*
1145 * Call device private open method
1146 */
1147 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001148
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001149 if (ops->ndo_validate_addr)
1150 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001151
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001152 if (!ret && ops->ndo_open)
1153 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001155 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 * If it went open OK then:
1157 */
1158
Jeff Garzikbada3392007-10-23 20:19:37 -07001159 if (ret)
1160 clear_bit(__LINK_STATE_START, &dev->state);
1161 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 /*
1163 * Set the flags.
1164 */
1165 dev->flags |= IFF_UP;
1166
1167 /*
Dan Williams649274d2009-01-11 00:20:39 -08001168 * Enable NET_DMA
1169 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001170 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001171
1172 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 * Initialize multicasting status
1174 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001175 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176
1177 /*
1178 * Wakeup transmit queue engine
1179 */
1180 dev_activate(dev);
1181
1182 /*
1183 * ... and announce new interface.
1184 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001185 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 return ret;
1189}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001190EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192/**
1193 * dev_close - shutdown an interface.
1194 * @dev: device to shutdown
1195 *
1196 * This function moves an active device into down state. A
1197 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1198 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1199 * chain.
1200 */
1201int dev_close(struct net_device *dev)
1202{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001203 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001204 ASSERT_RTNL();
1205
David S. Miller9d5010d2007-09-12 14:33:25 +02001206 might_sleep();
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (!(dev->flags & IFF_UP))
1209 return 0;
1210
1211 /*
1212 * Tell people we are going down, so that they can
1213 * prepare to death, when device is still operating.
1214 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001215 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 clear_bit(__LINK_STATE_START, &dev->state);
1218
1219 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001220 * it can be even on different cpu. So just clear netif_running().
1221 *
1222 * dev->stop() will invoke napi_disable() on all of it's
1223 * napi_struct instances on this device.
1224 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001227 dev_deactivate(dev);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 /*
1230 * Call the device specific close. This cannot fail.
1231 * Only if device is UP
1232 *
1233 * We allow it to be called even after a DETACH hot-plug
1234 * event.
1235 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001236 if (ops->ndo_stop)
1237 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
1239 /*
1240 * Device is now down.
1241 */
1242
1243 dev->flags &= ~IFF_UP;
1244
1245 /*
1246 * Tell people we are down
1247 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001248 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Dan Williams649274d2009-01-11 00:20:39 -08001250 /*
1251 * Shutdown NET_DMA
1252 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001253 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 return 0;
1256}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001257EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
1259
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001260/**
1261 * dev_disable_lro - disable Large Receive Offload on a device
1262 * @dev: device
1263 *
1264 * Disable Large Receive Offload (LRO) on a net device. Must be
1265 * called under RTNL. This is needed if received packets may be
1266 * forwarded to another interface.
1267 */
1268void dev_disable_lro(struct net_device *dev)
1269{
1270 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1271 dev->ethtool_ops->set_flags) {
1272 u32 flags = dev->ethtool_ops->get_flags(dev);
1273 if (flags & ETH_FLAG_LRO) {
1274 flags &= ~ETH_FLAG_LRO;
1275 dev->ethtool_ops->set_flags(dev, flags);
1276 }
1277 }
1278 WARN_ON(dev->features & NETIF_F_LRO);
1279}
1280EXPORT_SYMBOL(dev_disable_lro);
1281
1282
Eric W. Biederman881d9662007-09-17 11:56:21 -07001283static int dev_boot_phase = 1;
1284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285/*
1286 * Device change register/unregister. These are not inline or static
1287 * as we export them to the world.
1288 */
1289
1290/**
1291 * register_netdevice_notifier - register a network notifier block
1292 * @nb: notifier
1293 *
1294 * Register a notifier to be called when network device events occur.
1295 * The notifier passed is linked into the kernel structures and must
1296 * not be reused until it has been unregistered. A negative errno code
1297 * is returned on a failure.
1298 *
1299 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001300 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 * view of the network device list.
1302 */
1303
1304int register_netdevice_notifier(struct notifier_block *nb)
1305{
1306 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001307 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001308 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 int err;
1310
1311 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001312 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001313 if (err)
1314 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001315 if (dev_boot_phase)
1316 goto unlock;
1317 for_each_net(net) {
1318 for_each_netdev(net, dev) {
1319 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1320 err = notifier_to_errno(err);
1321 if (err)
1322 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Eric W. Biederman881d9662007-09-17 11:56:21 -07001324 if (!(dev->flags & IFF_UP))
1325 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001326
Eric W. Biederman881d9662007-09-17 11:56:21 -07001327 nb->notifier_call(nb, NETDEV_UP, dev);
1328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001330
1331unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 rtnl_unlock();
1333 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001334
1335rollback:
1336 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001337 for_each_net(net) {
1338 for_each_netdev(net, dev) {
1339 if (dev == last)
1340 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001341
Eric W. Biederman881d9662007-09-17 11:56:21 -07001342 if (dev->flags & IFF_UP) {
1343 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1344 nb->notifier_call(nb, NETDEV_DOWN, dev);
1345 }
1346 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Octavian Purdila395264d2009-11-16 13:49:35 +00001347 nb->notifier_call(nb, NETDEV_UNREGISTER_PERNET, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001348 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001349 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001350
1351 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001352 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001354EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
1356/**
1357 * unregister_netdevice_notifier - unregister a network notifier block
1358 * @nb: notifier
1359 *
1360 * Unregister a notifier previously registered by
1361 * register_netdevice_notifier(). The notifier is unlinked into the
1362 * kernel structures and may then be reused. A negative errno code
1363 * is returned on a failure.
1364 */
1365
1366int unregister_netdevice_notifier(struct notifier_block *nb)
1367{
Herbert Xu9f514952006-03-25 01:24:25 -08001368 int err;
1369
1370 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001371 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001372 rtnl_unlock();
1373 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001375EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377/**
1378 * call_netdevice_notifiers - call all network notifier blocks
1379 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001380 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 *
1382 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001383 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 */
1385
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001386int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001388 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389}
1390
1391/* When > 0 there are consumers of rx skb time stamps */
1392static atomic_t netstamp_needed = ATOMIC_INIT(0);
1393
1394void net_enable_timestamp(void)
1395{
1396 atomic_inc(&netstamp_needed);
1397}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001398EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
1400void net_disable_timestamp(void)
1401{
1402 atomic_dec(&netstamp_needed);
1403}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001404EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001406static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001409 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001410 else
1411 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412}
1413
1414/*
1415 * Support routine. Sends outgoing frames to any network
1416 * taps currently in use.
1417 */
1418
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001419static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
1421 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001422
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001423#ifdef CONFIG_NET_CLS_ACT
1424 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1425 net_timestamp(skb);
1426#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001427 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001428#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 rcu_read_lock();
1431 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1432 /* Never send packets back to the socket
1433 * they originated from - MvS (miquels@drinkel.ow.org)
1434 */
1435 if ((ptype->dev == dev || !ptype->dev) &&
1436 (ptype->af_packet_priv == NULL ||
1437 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001438 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (!skb2)
1440 break;
1441
1442 /* skb->nh should be correctly
1443 set by sender, so that the second statement is
1444 just protection against buggy protocols.
1445 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001446 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001448 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001449 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 if (net_ratelimit())
1451 printk(KERN_CRIT "protocol %04x is "
1452 "buggy, dev %s\n",
1453 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001454 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 }
1456
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001457 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001459 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 }
1461 }
1462 rcu_read_unlock();
1463}
1464
Denis Vlasenko56079432006-03-29 15:57:29 -08001465
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001466static inline void __netif_reschedule(struct Qdisc *q)
1467{
1468 struct softnet_data *sd;
1469 unsigned long flags;
1470
1471 local_irq_save(flags);
1472 sd = &__get_cpu_var(softnet_data);
1473 q->next_sched = sd->output_queue;
1474 sd->output_queue = q;
1475 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1476 local_irq_restore(flags);
1477}
1478
David S. Miller37437bb2008-07-16 02:15:04 -07001479void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001480{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001481 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1482 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001483}
1484EXPORT_SYMBOL(__netif_schedule);
1485
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001486void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001487{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001488 if (atomic_dec_and_test(&skb->users)) {
1489 struct softnet_data *sd;
1490 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001491
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001492 local_irq_save(flags);
1493 sd = &__get_cpu_var(softnet_data);
1494 skb->next = sd->completion_queue;
1495 sd->completion_queue = skb;
1496 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1497 local_irq_restore(flags);
1498 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001499}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001500EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001501
1502void dev_kfree_skb_any(struct sk_buff *skb)
1503{
1504 if (in_irq() || irqs_disabled())
1505 dev_kfree_skb_irq(skb);
1506 else
1507 dev_kfree_skb(skb);
1508}
1509EXPORT_SYMBOL(dev_kfree_skb_any);
1510
1511
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001512/**
1513 * netif_device_detach - mark device as removed
1514 * @dev: network device
1515 *
1516 * Mark device as removed from system and therefore no longer available.
1517 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001518void netif_device_detach(struct net_device *dev)
1519{
1520 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1521 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001522 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001523 }
1524}
1525EXPORT_SYMBOL(netif_device_detach);
1526
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001527/**
1528 * netif_device_attach - mark device as attached
1529 * @dev: network device
1530 *
1531 * Mark device as attached from system and restart if needed.
1532 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001533void netif_device_attach(struct net_device *dev)
1534{
1535 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1536 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001537 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001538 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001539 }
1540}
1541EXPORT_SYMBOL(netif_device_attach);
1542
Ben Hutchings6de329e2008-06-16 17:02:28 -07001543static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1544{
1545 return ((features & NETIF_F_GEN_CSUM) ||
1546 ((features & NETIF_F_IP_CSUM) &&
1547 protocol == htons(ETH_P_IP)) ||
1548 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001549 protocol == htons(ETH_P_IPV6)) ||
1550 ((features & NETIF_F_FCOE_CRC) &&
1551 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001552}
1553
1554static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1555{
1556 if (can_checksum_protocol(dev->features, skb->protocol))
1557 return true;
1558
1559 if (skb->protocol == htons(ETH_P_8021Q)) {
1560 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1561 if (can_checksum_protocol(dev->features & dev->vlan_features,
1562 veh->h_vlan_encapsulated_proto))
1563 return true;
1564 }
1565
1566 return false;
1567}
Denis Vlasenko56079432006-03-29 15:57:29 -08001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569/*
1570 * Invalidate hardware checksum when packet is to be mangled, and
1571 * complete checksum manually on outgoing path.
1572 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001573int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574{
Al Virod3bc23e2006-11-14 21:24:49 -08001575 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001576 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Patrick McHardy84fa7932006-08-29 16:44:56 -07001578 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001579 goto out_set_summed;
1580
1581 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001582 /* Let GSO fix up the checksum. */
1583 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 }
1585
Herbert Xua0308472007-10-15 01:47:15 -07001586 offset = skb->csum_start - skb_headroom(skb);
1587 BUG_ON(offset >= skb_headlen(skb));
1588 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1589
1590 offset += skb->csum_offset;
1591 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1592
1593 if (skb_cloned(skb) &&
1594 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1596 if (ret)
1597 goto out;
1598 }
1599
Herbert Xua0308472007-10-15 01:47:15 -07001600 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001601out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001603out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 return ret;
1605}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001606EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001608/**
1609 * skb_gso_segment - Perform segmentation on skb.
1610 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001611 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001612 *
1613 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001614 *
1615 * It may return NULL if the skb requires no segmentation. This is
1616 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001617 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001618struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001619{
1620 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1621 struct packet_type *ptype;
Al Viro252e33462006-11-14 20:48:11 -08001622 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001623 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001624
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001625 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001626 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001627 __skb_pull(skb, skb->mac_len);
1628
Herbert Xu67fd1a72009-01-19 16:26:44 -08001629 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1630 struct net_device *dev = skb->dev;
1631 struct ethtool_drvinfo info = {};
1632
1633 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1634 dev->ethtool_ops->get_drvinfo(dev, &info);
1635
1636 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1637 "ip_summed=%d",
1638 info.driver, dev ? dev->features : 0L,
1639 skb->sk ? skb->sk->sk_route_caps : 0L,
1640 skb->len, skb->data_len, skb->ip_summed);
1641
Herbert Xua430a432006-07-08 13:34:56 -07001642 if (skb_header_cloned(skb) &&
1643 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1644 return ERR_PTR(err);
1645 }
1646
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001647 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001648 list_for_each_entry_rcu(ptype,
1649 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001650 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001651 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001652 err = ptype->gso_send_check(skb);
1653 segs = ERR_PTR(err);
1654 if (err || skb_gso_ok(skb, features))
1655 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001656 __skb_push(skb, (skb->data -
1657 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001658 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001660 break;
1661 }
1662 }
1663 rcu_read_unlock();
1664
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001665 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001666
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001667 return segs;
1668}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001669EXPORT_SYMBOL(skb_gso_segment);
1670
Herbert Xufb286bb2005-11-10 13:01:24 -08001671/* Take action when hardware reception checksum errors are detected. */
1672#ifdef CONFIG_BUG
1673void netdev_rx_csum_fault(struct net_device *dev)
1674{
1675 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001676 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001677 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001678 dump_stack();
1679 }
1680}
1681EXPORT_SYMBOL(netdev_rx_csum_fault);
1682#endif
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684/* Actually, we should eliminate this check as soon as we know, that:
1685 * 1. IOMMU is present and allows to map all the memory.
1686 * 2. No high memory really exists on this machine.
1687 */
1688
1689static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1690{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001691#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 int i;
1693
1694 if (dev->features & NETIF_F_HIGHDMA)
1695 return 0;
1696
1697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1698 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1699 return 1;
1700
Herbert Xu3d3a8532006-06-27 13:33:10 -07001701#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return 0;
1703}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001705struct dev_gso_cb {
1706 void (*destructor)(struct sk_buff *skb);
1707};
1708
1709#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1710
1711static void dev_gso_skb_destructor(struct sk_buff *skb)
1712{
1713 struct dev_gso_cb *cb;
1714
1715 do {
1716 struct sk_buff *nskb = skb->next;
1717
1718 skb->next = nskb->next;
1719 nskb->next = NULL;
1720 kfree_skb(nskb);
1721 } while (skb->next);
1722
1723 cb = DEV_GSO_CB(skb);
1724 if (cb->destructor)
1725 cb->destructor(skb);
1726}
1727
1728/**
1729 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1730 * @skb: buffer to segment
1731 *
1732 * This function segments the given skb and stores the list of segments
1733 * in skb->next.
1734 */
1735static int dev_gso_segment(struct sk_buff *skb)
1736{
1737 struct net_device *dev = skb->dev;
1738 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001739 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1740 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741
Herbert Xu576a30e2006-06-27 13:22:38 -07001742 segs = skb_gso_segment(skb, features);
1743
1744 /* Verifying header integrity only. */
1745 if (!segs)
1746 return 0;
1747
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001748 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001749 return PTR_ERR(segs);
1750
1751 skb->next = segs;
1752 DEV_GSO_CB(skb)->destructor = skb->destructor;
1753 skb->destructor = dev_gso_skb_destructor;
1754
1755 return 0;
1756}
1757
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001758int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1759 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001760{
Stephen Hemminger00829822008-11-20 20:14:53 -08001761 const struct net_device_ops *ops = dev->netdev_ops;
Patrick McHardy572a9d72009-11-10 06:14:14 +00001762 int rc = NETDEV_TX_OK;
Stephen Hemminger00829822008-11-20 20:14:53 -08001763
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001764 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001765 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001766 dev_queue_xmit_nit(skb, dev);
1767
Herbert Xu576a30e2006-06-27 13:22:38 -07001768 if (netif_needs_gso(dev, skb)) {
1769 if (unlikely(dev_gso_segment(skb)))
1770 goto out_kfree_skb;
1771 if (skb->next)
1772 goto gso;
1773 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001774
Eric Dumazet93f154b2009-05-18 22:19:19 -07001775 /*
1776 * If device doesnt need skb->dst, release it right now while
1777 * its hot in this cpu cache
1778 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001779 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1780 skb_dst_drop(skb);
1781
Patrick Ohlyac45f602009-02-12 05:03:37 +00001782 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001783 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001784 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001785 /*
1786 * TODO: if skb_orphan() was called by
1787 * dev->hard_start_xmit() (for example, the unmodified
1788 * igb driver does that; bnx2 doesn't), then
1789 * skb_tx_software_timestamp() will be unable to send
1790 * back the time stamp.
1791 *
1792 * How can this be prevented? Always create another
1793 * reference to the socket before calling
1794 * dev->hard_start_xmit()? Prevent that skb_orphan()
1795 * does anything in dev->hard_start_xmit() by clearing
1796 * the skb destructor before the call and restoring it
1797 * afterwards, then doing the skb_orphan() ourselves?
1798 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001799 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001800 }
1801
Herbert Xu576a30e2006-06-27 13:22:38 -07001802gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001803 do {
1804 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001805
1806 skb->next = nskb->next;
1807 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001808 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001809 if (unlikely(rc != NETDEV_TX_OK)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00001810 if (rc & ~NETDEV_TX_MASK)
1811 goto out_kfree_gso_skb;
Michael Chanf54d9e82006-06-25 23:57:04 -07001812 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001813 skb->next = nskb;
1814 return rc;
1815 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001816 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001817 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001818 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001819 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001820
Patrick McHardy572a9d72009-11-10 06:14:14 +00001821out_kfree_gso_skb:
1822 if (likely(skb->next == NULL))
1823 skb->destructor = DEV_GSO_CB(skb)->destructor;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001824out_kfree_skb:
1825 kfree_skb(skb);
Patrick McHardy572a9d72009-11-10 06:14:14 +00001826 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001827}
1828
David S. Miller70192982009-01-27 16:34:47 -08001829static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001830
Stephen Hemminger92477442009-03-21 13:39:26 -07001831u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001832{
David S. Miller70192982009-01-27 16:34:47 -08001833 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001834
David S. Miller513de112009-05-03 14:43:10 -07001835 if (skb_rx_queue_recorded(skb)) {
1836 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001837 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001838 hash -= dev->real_num_tx_queues;
1839 return hash;
1840 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001841
1842 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001843 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001844 else
David S. Miller70192982009-01-27 16:34:47 -08001845 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001846
David S. Miller70192982009-01-27 16:34:47 -08001847 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001848
David S. Millerb6b2fed2008-07-21 09:48:06 -07001849 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001850}
Stephen Hemminger92477442009-03-21 13:39:26 -07001851EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001852
Eric Dumazeted046422009-11-13 21:54:04 +00001853static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1854{
1855 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1856 if (net_ratelimit()) {
1857 WARN(1, "%s selects TX queue %d, but "
1858 "real number of TX queues is %d\n",
1859 dev->name, queue_index,
1860 dev->real_num_tx_queues);
1861 }
1862 return 0;
1863 }
1864 return queue_index;
1865}
1866
David S. Millere8a04642008-07-17 00:34:19 -07001867static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1868 struct sk_buff *skb)
1869{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001870 u16 queue_index;
1871 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001872
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001873 if (sk_tx_queue_recorded(sk)) {
1874 queue_index = sk_tx_queue_get(sk);
1875 } else {
1876 const struct net_device_ops *ops = dev->netdev_ops;
1877
1878 if (ops->ndo_select_queue) {
1879 queue_index = ops->ndo_select_queue(dev, skb);
Eric Dumazeted046422009-11-13 21:54:04 +00001880 queue_index = dev_cap_txqueue(dev, queue_index);
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001881 } else {
1882 queue_index = 0;
1883 if (dev->real_num_tx_queues > 1)
1884 queue_index = skb_tx_hash(dev, skb);
1885
1886 if (sk && sk->sk_dst_cache)
1887 sk_tx_queue_set(sk, queue_index);
1888 }
1889 }
David S. Millereae792b2008-07-15 03:03:33 -07001890
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001891 skb_set_queue_mapping(skb, queue_index);
1892 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001893}
1894
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001895static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1896 struct net_device *dev,
1897 struct netdev_queue *txq)
1898{
1899 spinlock_t *root_lock = qdisc_lock(q);
1900 int rc;
1901
1902 spin_lock(root_lock);
1903 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1904 kfree_skb(skb);
1905 rc = NET_XMIT_DROP;
1906 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1907 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1908 /*
1909 * This is a work-conserving queue; there are no old skbs
1910 * waiting to be sent out; and the qdisc is not running -
1911 * xmit the skb directly.
1912 */
1913 __qdisc_update_bstats(q, skb->len);
1914 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1915 __qdisc_run(q);
1916 else
1917 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1918
1919 rc = NET_XMIT_SUCCESS;
1920 } else {
1921 rc = qdisc_enqueue_root(skb, q);
1922 qdisc_run(q);
1923 }
1924 spin_unlock(root_lock);
1925
1926 return rc;
1927}
1928
Dave Jonesd29f7492008-07-22 14:09:06 -07001929/**
1930 * dev_queue_xmit - transmit a buffer
1931 * @skb: buffer to transmit
1932 *
1933 * Queue a buffer for transmission to a network device. The caller must
1934 * have set the device and priority and built the buffer before calling
1935 * this function. The function can be called from an interrupt.
1936 *
1937 * A negative errno code is returned on a failure. A success does not
1938 * guarantee the frame will be transmitted as it may be dropped due
1939 * to congestion or traffic shaping.
1940 *
1941 * -----------------------------------------------------------------------------------
1942 * I notice this method can also return errors from the queue disciplines,
1943 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1944 * be positive.
1945 *
1946 * Regardless of the return value, the skb is consumed, so it is currently
1947 * difficult to retry a send to this method. (You can bump the ref count
1948 * before sending to hold a reference for retry if you are careful.)
1949 *
1950 * When calling this method, interrupts MUST be enabled. This is because
1951 * the BH enable code must have IRQs enabled so that it will not deadlock.
1952 * --BLG
1953 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954int dev_queue_xmit(struct sk_buff *skb)
1955{
1956 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001957 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 struct Qdisc *q;
1959 int rc = -ENOMEM;
1960
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001961 /* GSO will handle the following emulations directly. */
1962 if (netif_needs_gso(dev, skb))
1963 goto gso;
1964
David S. Miller4cf704f2009-06-09 00:18:51 -07001965 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001967 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 goto out_kfree_skb;
1969
1970 /* Fragmented skb is linearized if device does not support SG,
1971 * or if at least one of fragments is in highmem and device
1972 * does not support DMA from it.
1973 */
1974 if (skb_shinfo(skb)->nr_frags &&
1975 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001976 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 goto out_kfree_skb;
1978
1979 /* If packet is not checksummed and device does not support
1980 * checksumming for this protocol, complete checksumming here.
1981 */
Herbert Xu663ead32007-04-09 11:59:07 -07001982 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1983 skb_set_transport_header(skb, skb->csum_start -
1984 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001985 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1986 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001987 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001989gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001990 /* Disable soft irqs for various locks below. Also
1991 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001993 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
David S. Millereae792b2008-07-15 03:03:33 -07001995 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001996 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001999 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000#endif
2001 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00002002 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07002003 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 }
2005
2006 /* The device has no queue. Common case for software devices:
2007 loopback, all the sorts of tunnels...
2008
Herbert Xu932ff272006-06-09 12:20:56 -07002009 Really, it is unlikely that netif_tx_lock protection is necessary
2010 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 counters.)
2012 However, it is possible, that they rely on protection
2013 made by us here.
2014
2015 Check this and shot the lock. It is not prone from deadlocks.
2016 Either shot noqueue qdisc, it is even simpler 8)
2017 */
2018 if (dev->flags & IFF_UP) {
2019 int cpu = smp_processor_id(); /* ok because BHs are off */
2020
David S. Millerc773e842008-07-08 23:13:53 -07002021 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
David S. Millerc773e842008-07-08 23:13:53 -07002023 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002025 if (!netif_tx_queue_stopped(txq)) {
Patrick McHardy572a9d72009-11-10 06:14:14 +00002026 rc = dev_hard_start_xmit(skb, dev, txq);
2027 if (dev_xmit_complete(rc)) {
David S. Millerc773e842008-07-08 23:13:53 -07002028 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 goto out;
2030 }
2031 }
David S. Millerc773e842008-07-08 23:13:53 -07002032 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 if (net_ratelimit())
2034 printk(KERN_CRIT "Virtual device %s asks to "
2035 "queue packet!\n", dev->name);
2036 } else {
2037 /* Recursion is detected! It is possible,
2038 * unfortunately */
2039 if (net_ratelimit())
2040 printk(KERN_CRIT "Dead loop on virtual device "
2041 "%s, fix it urgently!\n", dev->name);
2042 }
2043 }
2044
2045 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002046 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
2048out_kfree_skb:
2049 kfree_skb(skb);
2050 return rc;
2051out:
Herbert Xud4828d82006-06-22 02:28:18 -07002052 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 return rc;
2054}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002055EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057
2058/*=======================================================================
2059 Receiver routines
2060 =======================================================================*/
2061
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002062int netdev_max_backlog __read_mostly = 1000;
2063int netdev_budget __read_mostly = 300;
2064int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2067
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069/**
2070 * netif_rx - post buffer to the network code
2071 * @skb: buffer to post
2072 *
2073 * This function receives a packet from a device driver and queues it for
2074 * the upper (protocol) levels to process. It always succeeds. The buffer
2075 * may be dropped during processing for congestion control or by the
2076 * protocol layers.
2077 *
2078 * return values:
2079 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 * NET_RX_DROP (packet was dropped)
2081 *
2082 */
2083
2084int netif_rx(struct sk_buff *skb)
2085{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 struct softnet_data *queue;
2087 unsigned long flags;
2088
2089 /* if netpoll wants it, pretend we never saw it */
2090 if (netpoll_rx(skb))
2091 return NET_RX_DROP;
2092
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002093 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002094 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096 /*
2097 * The code is rearranged so that the path is the most
2098 * short when CPU is congested, but is still operating.
2099 */
2100 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 queue = &__get_cpu_var(softnet_data);
2102
2103 __get_cpu_var(netdev_rx_stat).total++;
2104 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2105 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002109 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 }
2111
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002112 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 goto enqueue;
2114 }
2115
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 __get_cpu_var(netdev_rx_stat).dropped++;
2117 local_irq_restore(flags);
2118
2119 kfree_skb(skb);
2120 return NET_RX_DROP;
2121}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002122EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124int netif_rx_ni(struct sk_buff *skb)
2125{
2126 int err;
2127
2128 preempt_disable();
2129 err = netif_rx(skb);
2130 if (local_softirq_pending())
2131 do_softirq();
2132 preempt_enable();
2133
2134 return err;
2135}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136EXPORT_SYMBOL(netif_rx_ni);
2137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138static void net_tx_action(struct softirq_action *h)
2139{
2140 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2141
2142 if (sd->completion_queue) {
2143 struct sk_buff *clist;
2144
2145 local_irq_disable();
2146 clist = sd->completion_queue;
2147 sd->completion_queue = NULL;
2148 local_irq_enable();
2149
2150 while (clist) {
2151 struct sk_buff *skb = clist;
2152 clist = clist->next;
2153
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002154 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 __kfree_skb(skb);
2156 }
2157 }
2158
2159 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002160 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
2162 local_irq_disable();
2163 head = sd->output_queue;
2164 sd->output_queue = NULL;
2165 local_irq_enable();
2166
2167 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002168 struct Qdisc *q = head;
2169 spinlock_t *root_lock;
2170
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 head = head->next_sched;
2172
David S. Miller5fb66222008-08-02 20:02:43 -07002173 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002174 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002175 smp_mb__before_clear_bit();
2176 clear_bit(__QDISC_STATE_SCHED,
2177 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002178 qdisc_run(q);
2179 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002181 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002182 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002183 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002184 } else {
2185 smp_mb__before_clear_bit();
2186 clear_bit(__QDISC_STATE_SCHED,
2187 &q->state);
2188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 }
2190 }
2191 }
2192}
2193
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002194static inline int deliver_skb(struct sk_buff *skb,
2195 struct packet_type *pt_prev,
2196 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
2198 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002199 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200}
2201
2202#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002203
2204#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2205/* This hook is defined here for ATM LANE */
2206int (*br_fdb_test_addr_hook)(struct net_device *dev,
2207 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002208EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002209#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Stephen Hemminger6229e362007-03-21 13:38:47 -07002211/*
2212 * If bridge module is loaded call bridging hook.
2213 * returns NULL if packet was consumed.
2214 */
2215struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2216 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002217EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002218
Stephen Hemminger6229e362007-03-21 13:38:47 -07002219static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2220 struct packet_type **pt_prev, int *ret,
2221 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222{
2223 struct net_bridge_port *port;
2224
Stephen Hemminger6229e362007-03-21 13:38:47 -07002225 if (skb->pkt_type == PACKET_LOOPBACK ||
2226 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2227 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
2229 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002230 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002232 }
2233
Stephen Hemminger6229e362007-03-21 13:38:47 -07002234 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235}
2236#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002237#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238#endif
2239
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002240#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2241struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2242EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2243
2244static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2245 struct packet_type **pt_prev,
2246 int *ret,
2247 struct net_device *orig_dev)
2248{
2249 if (skb->dev->macvlan_port == NULL)
2250 return skb;
2251
2252 if (*pt_prev) {
2253 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2254 *pt_prev = NULL;
2255 }
2256 return macvlan_handle_frame_hook(skb);
2257}
2258#else
2259#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2260#endif
2261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262#ifdef CONFIG_NET_CLS_ACT
2263/* TODO: Maybe we should just force sch_ingress to be compiled in
2264 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2265 * a compare and 2 stores extra right now if we dont have it on
2266 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002267 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 * the ingress scheduler, you just cant add policies on ingress.
2269 *
2270 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002271static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002274 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002275 struct netdev_queue *rxq;
2276 int result = TC_ACT_OK;
2277 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002278
Herbert Xuf697c3e2007-10-14 00:38:47 -07002279 if (MAX_RED_LOOP < ttl++) {
2280 printk(KERN_WARNING
2281 "Redir loop detected Dropping packet (%d->%d)\n",
2282 skb->iif, dev->ifindex);
2283 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 }
2285
Herbert Xuf697c3e2007-10-14 00:38:47 -07002286 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2287 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2288
David S. Miller555353c2008-07-08 17:33:13 -07002289 rxq = &dev->rx_queue;
2290
David S. Miller83874002008-07-17 00:53:03 -07002291 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002292 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002293 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002294 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2295 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002296 spin_unlock(qdisc_lock(q));
2297 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return result;
2300}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002301
2302static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2303 struct packet_type **pt_prev,
2304 int *ret, struct net_device *orig_dev)
2305{
David S. Miller8d50b532008-07-30 02:37:46 -07002306 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002307 goto out;
2308
2309 if (*pt_prev) {
2310 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2311 *pt_prev = NULL;
2312 } else {
2313 /* Huh? Why does turning on AF_PACKET affect this? */
2314 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2315 }
2316
2317 switch (ing_filter(skb)) {
2318 case TC_ACT_SHOT:
2319 case TC_ACT_STOLEN:
2320 kfree_skb(skb);
2321 return NULL;
2322 }
2323
2324out:
2325 skb->tc_verd = 0;
2326 return skb;
2327}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328#endif
2329
Patrick McHardybc1d0412008-07-14 22:49:30 -07002330/*
2331 * netif_nit_deliver - deliver received packets to network taps
2332 * @skb: buffer
2333 *
2334 * This function is used to deliver incoming packets to network
2335 * taps. It should be used when the normal netif_receive_skb path
2336 * is bypassed, for example because of VLAN acceleration.
2337 */
2338void netif_nit_deliver(struct sk_buff *skb)
2339{
2340 struct packet_type *ptype;
2341
2342 if (list_empty(&ptype_all))
2343 return;
2344
2345 skb_reset_network_header(skb);
2346 skb_reset_transport_header(skb);
2347 skb->mac_len = skb->network_header - skb->mac_header;
2348
2349 rcu_read_lock();
2350 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2351 if (!ptype->dev || ptype->dev == skb->dev)
2352 deliver_skb(skb, ptype, skb->dev);
2353 }
2354 rcu_read_unlock();
2355}
2356
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002357/**
2358 * netif_receive_skb - process receive buffer from network
2359 * @skb: buffer to process
2360 *
2361 * netif_receive_skb() is the main receive data processing function.
2362 * It always succeeds. The buffer may be dropped during processing
2363 * for congestion control or by the protocol layers.
2364 *
2365 * This function may only be called from softirq context and interrupts
2366 * should be enabled.
2367 *
2368 * Return values (usually ignored):
2369 * NET_RX_SUCCESS: no congestion
2370 * NET_RX_DROP: packet was dropped
2371 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372int netif_receive_skb(struct sk_buff *skb)
2373{
2374 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002375 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002376 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08002378 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002380 if (!skb->tstamp.tv64)
2381 net_timestamp(skb);
2382
Eric Dumazet05423b22009-10-26 18:40:35 -07002383 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002384 return NET_RX_SUCCESS;
2385
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002387 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 return NET_RX_DROP;
2389
Patrick McHardyc01003c2007-03-29 11:46:52 -07002390 if (!skb->iif)
2391 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002392
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002393 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002394 orig_dev = skb->dev;
2395 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002396 if (skb_bond_should_drop(skb))
2397 null_or_orig = orig_dev; /* deliver only exact match */
2398 else
2399 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002400 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 __get_cpu_var(netdev_rx_stat).total++;
2403
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002404 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002405 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002406 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408 pt_prev = NULL;
2409
2410 rcu_read_lock();
2411
2412#ifdef CONFIG_NET_CLS_ACT
2413 if (skb->tc_verd & TC_NCLS) {
2414 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2415 goto ncls;
2416 }
2417#endif
2418
2419 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002420 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2421 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002422 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002423 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 pt_prev = ptype;
2425 }
2426 }
2427
2428#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002429 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2430 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432ncls:
2433#endif
2434
Stephen Hemminger6229e362007-03-21 13:38:47 -07002435 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2436 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002438 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2439 if (!skb)
2440 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
2442 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002443 list_for_each_entry_rcu(ptype,
2444 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002446 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2447 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002448 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002449 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 pt_prev = ptype;
2451 }
2452 }
2453
2454 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002455 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 } else {
2457 kfree_skb(skb);
2458 /* Jamal, now you will not able to escape explaining
2459 * me how you were going to use this. :-)
2460 */
2461 ret = NET_RX_DROP;
2462 }
2463
2464out:
2465 rcu_read_unlock();
2466 return ret;
2467}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002468EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002470/* Network device is going away, flush any packets still pending */
2471static void flush_backlog(void *arg)
2472{
2473 struct net_device *dev = arg;
2474 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2475 struct sk_buff *skb, *tmp;
2476
2477 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2478 if (skb->dev == dev) {
2479 __skb_unlink(skb, &queue->input_pkt_queue);
2480 kfree_skb(skb);
2481 }
2482}
2483
Herbert Xud565b0a2008-12-15 23:38:52 -08002484static int napi_gro_complete(struct sk_buff *skb)
2485{
2486 struct packet_type *ptype;
2487 __be16 type = skb->protocol;
2488 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2489 int err = -ENOENT;
2490
Herbert Xufc59f9a2009-04-14 15:11:06 -07002491 if (NAPI_GRO_CB(skb)->count == 1) {
2492 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002493 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002494 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002495
2496 rcu_read_lock();
2497 list_for_each_entry_rcu(ptype, head, list) {
2498 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2499 continue;
2500
2501 err = ptype->gro_complete(skb);
2502 break;
2503 }
2504 rcu_read_unlock();
2505
2506 if (err) {
2507 WARN_ON(&ptype->list == head);
2508 kfree_skb(skb);
2509 return NET_RX_SUCCESS;
2510 }
2511
2512out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002513 return netif_receive_skb(skb);
2514}
2515
2516void napi_gro_flush(struct napi_struct *napi)
2517{
2518 struct sk_buff *skb, *next;
2519
2520 for (skb = napi->gro_list; skb; skb = next) {
2521 next = skb->next;
2522 skb->next = NULL;
2523 napi_gro_complete(skb);
2524 }
2525
Herbert Xu4ae55442009-02-08 18:00:36 +00002526 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002527 napi->gro_list = NULL;
2528}
2529EXPORT_SYMBOL(napi_gro_flush);
2530
Ben Hutchings5b252f02009-10-29 07:17:09 +00002531enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002532{
2533 struct sk_buff **pp = NULL;
2534 struct packet_type *ptype;
2535 __be16 type = skb->protocol;
2536 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002537 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002538 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002539 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002540
2541 if (!(skb->dev->features & NETIF_F_GRO))
2542 goto normal;
2543
David S. Miller4cf704f2009-06-09 00:18:51 -07002544 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002545 goto normal;
2546
Herbert Xud565b0a2008-12-15 23:38:52 -08002547 rcu_read_lock();
2548 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002549 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2550 continue;
2551
Herbert Xu86911732009-01-29 14:19:50 +00002552 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002553 mac_len = skb->network_header - skb->mac_header;
2554 skb->mac_len = mac_len;
2555 NAPI_GRO_CB(skb)->same_flow = 0;
2556 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002557 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002558
Herbert Xud565b0a2008-12-15 23:38:52 -08002559 pp = ptype->gro_receive(&napi->gro_list, skb);
2560 break;
2561 }
2562 rcu_read_unlock();
2563
2564 if (&ptype->list == head)
2565 goto normal;
2566
Herbert Xu0da2afd52008-12-26 14:57:42 -08002567 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002568 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002569
Herbert Xud565b0a2008-12-15 23:38:52 -08002570 if (pp) {
2571 struct sk_buff *nskb = *pp;
2572
2573 *pp = nskb->next;
2574 nskb->next = NULL;
2575 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002576 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002577 }
2578
Herbert Xu0da2afd52008-12-26 14:57:42 -08002579 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002580 goto ok;
2581
Herbert Xu4ae55442009-02-08 18:00:36 +00002582 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002583 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002584
Herbert Xu4ae55442009-02-08 18:00:36 +00002585 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002586 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002587 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002588 skb->next = napi->gro_list;
2589 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002590 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002591
Herbert Xuad0f9902009-02-01 01:24:55 -08002592pull:
Herbert Xucb189782009-05-26 18:50:31 +00002593 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2594 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2595
2596 BUG_ON(skb->end - skb->tail < grow);
2597
2598 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2599
2600 skb->tail += grow;
2601 skb->data_len -= grow;
2602
2603 skb_shinfo(skb)->frags[0].page_offset += grow;
2604 skb_shinfo(skb)->frags[0].size -= grow;
2605
2606 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2607 put_page(skb_shinfo(skb)->frags[0].page);
2608 memmove(skb_shinfo(skb)->frags,
2609 skb_shinfo(skb)->frags + 1,
2610 --skb_shinfo(skb)->nr_frags);
2611 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002612 }
2613
Herbert Xud565b0a2008-12-15 23:38:52 -08002614ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002615 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002616
2617normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002618 ret = GRO_NORMAL;
2619 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002620}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002621EXPORT_SYMBOL(dev_gro_receive);
2622
Ben Hutchings5b252f02009-10-29 07:17:09 +00002623static gro_result_t
2624__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002625{
2626 struct sk_buff *p;
2627
Herbert Xud1c76af2009-03-16 10:50:02 -07002628 if (netpoll_rx_on(skb))
2629 return GRO_NORMAL;
2630
Herbert Xu96e93ea2009-01-06 10:49:34 -08002631 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002632 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2633 && !compare_ether_header(skb_mac_header(p),
2634 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002635 NAPI_GRO_CB(p)->flush = 0;
2636 }
2637
2638 return dev_gro_receive(napi, skb);
2639}
Herbert Xu5d38a072009-01-04 16:13:40 -08002640
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002641gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002642{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002643 switch (ret) {
2644 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002645 if (netif_receive_skb(skb))
2646 ret = GRO_DROP;
2647 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002648
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002649 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002650 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002651 kfree_skb(skb);
2652 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002653
2654 case GRO_HELD:
2655 case GRO_MERGED:
2656 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002657 }
2658
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002659 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002660}
2661EXPORT_SYMBOL(napi_skb_finish);
2662
Herbert Xu78a478d2009-05-26 18:50:21 +00002663void skb_gro_reset_offset(struct sk_buff *skb)
2664{
2665 NAPI_GRO_CB(skb)->data_offset = 0;
2666 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002667 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002668
Herbert Xu78d3fd02009-05-26 18:50:23 +00002669 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002670 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002671 NAPI_GRO_CB(skb)->frag0 =
2672 page_address(skb_shinfo(skb)->frags[0].page) +
2673 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002674 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2675 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002676}
2677EXPORT_SYMBOL(skb_gro_reset_offset);
2678
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002679gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002680{
Herbert Xu86911732009-01-29 14:19:50 +00002681 skb_gro_reset_offset(skb);
2682
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002683 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002684}
2685EXPORT_SYMBOL(napi_gro_receive);
2686
Herbert Xu96e93ea2009-01-06 10:49:34 -08002687void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2688{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002689 __skb_pull(skb, skb_headlen(skb));
2690 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2691
2692 napi->skb = skb;
2693}
2694EXPORT_SYMBOL(napi_reuse_skb);
2695
Herbert Xu76620aa2009-04-16 02:02:07 -07002696struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002697{
Herbert Xu5d38a072009-01-04 16:13:40 -08002698 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002699
2700 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002701 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2702 if (skb)
2703 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002704 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002705 return skb;
2706}
Herbert Xu76620aa2009-04-16 02:02:07 -07002707EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002708
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002709gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2710 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002711{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002712 switch (ret) {
2713 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002714 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002715 skb->protocol = eth_type_trans(skb, napi->dev);
2716
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002717 if (ret == GRO_HELD)
2718 skb_gro_pull(skb, -ETH_HLEN);
2719 else if (netif_receive_skb(skb))
2720 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002721 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002722
2723 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002724 case GRO_MERGED_FREE:
2725 napi_reuse_skb(napi, skb);
2726 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002727
2728 case GRO_MERGED:
2729 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002730 }
2731
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002732 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002733}
2734EXPORT_SYMBOL(napi_frags_finish);
2735
Herbert Xu76620aa2009-04-16 02:02:07 -07002736struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002737{
Herbert Xu76620aa2009-04-16 02:02:07 -07002738 struct sk_buff *skb = napi->skb;
2739 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002740 unsigned int hlen;
2741 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002742
2743 napi->skb = NULL;
2744
2745 skb_reset_mac_header(skb);
2746 skb_gro_reset_offset(skb);
2747
Herbert Xua5b1cf22009-05-26 18:50:28 +00002748 off = skb_gro_offset(skb);
2749 hlen = off + sizeof(*eth);
2750 eth = skb_gro_header_fast(skb, off);
2751 if (skb_gro_header_hard(skb, hlen)) {
2752 eth = skb_gro_header_slow(skb, hlen, off);
2753 if (unlikely(!eth)) {
2754 napi_reuse_skb(napi, skb);
2755 skb = NULL;
2756 goto out;
2757 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002758 }
2759
2760 skb_gro_pull(skb, sizeof(*eth));
2761
2762 /*
2763 * This works because the only protocols we care about don't require
2764 * special handling. We'll fix it up properly at the end.
2765 */
2766 skb->protocol = eth->h_proto;
2767
2768out:
2769 return skb;
2770}
2771EXPORT_SYMBOL(napi_frags_skb);
2772
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002773gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002774{
2775 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002776
2777 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002778 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002779
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002780 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002781}
2782EXPORT_SYMBOL(napi_gro_frags);
2783
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002784static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785{
2786 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2788 unsigned long start_time = jiffies;
2789
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002790 napi->weight = weight_p;
2791 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
2794 local_irq_disable();
2795 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002796 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002797 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002798 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002799 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 local_irq_enable();
2802
Herbert Xu8f1ead22009-03-26 00:59:10 -07002803 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002804 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002806 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807}
2808
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002809/**
2810 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002811 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002812 *
2813 * The entry's receive function will be scheduled to run
2814 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002815void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002816{
2817 unsigned long flags;
2818
2819 local_irq_save(flags);
2820 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2821 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2822 local_irq_restore(flags);
2823}
2824EXPORT_SYMBOL(__napi_schedule);
2825
Herbert Xud565b0a2008-12-15 23:38:52 -08002826void __napi_complete(struct napi_struct *n)
2827{
2828 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2829 BUG_ON(n->gro_list);
2830
2831 list_del(&n->poll_list);
2832 smp_mb__before_clear_bit();
2833 clear_bit(NAPI_STATE_SCHED, &n->state);
2834}
2835EXPORT_SYMBOL(__napi_complete);
2836
2837void napi_complete(struct napi_struct *n)
2838{
2839 unsigned long flags;
2840
2841 /*
2842 * don't let napi dequeue from the cpu poll list
2843 * just in case its running on a different cpu
2844 */
2845 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2846 return;
2847
2848 napi_gro_flush(n);
2849 local_irq_save(flags);
2850 __napi_complete(n);
2851 local_irq_restore(flags);
2852}
2853EXPORT_SYMBOL(napi_complete);
2854
2855void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2856 int (*poll)(struct napi_struct *, int), int weight)
2857{
2858 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002859 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002860 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002861 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002862 napi->poll = poll;
2863 napi->weight = weight;
2864 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002865 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002866#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002867 spin_lock_init(&napi->poll_lock);
2868 napi->poll_owner = -1;
2869#endif
2870 set_bit(NAPI_STATE_SCHED, &napi->state);
2871}
2872EXPORT_SYMBOL(netif_napi_add);
2873
2874void netif_napi_del(struct napi_struct *napi)
2875{
2876 struct sk_buff *skb, *next;
2877
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002878 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002879 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002880
2881 for (skb = napi->gro_list; skb; skb = next) {
2882 next = skb->next;
2883 skb->next = NULL;
2884 kfree_skb(skb);
2885 }
2886
2887 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002888 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002889}
2890EXPORT_SYMBOL(netif_napi_del);
2891
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893static void net_rx_action(struct softirq_action *h)
2894{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002895 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002896 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002897 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002898 void *have;
2899
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 local_irq_disable();
2901
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002902 while (!list_empty(list)) {
2903 struct napi_struct *n;
2904 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002906 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002907 * Allow this to run for 2 jiffies since which will allow
2908 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002909 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002910 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 goto softnet_break;
2912
2913 local_irq_enable();
2914
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002915 /* Even though interrupts have been re-enabled, this
2916 * access is safe because interrupts can only add new
2917 * entries to the tail of this list, and only ->poll()
2918 * calls can remove this head entry from the list.
2919 */
2920 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002922 have = netpoll_poll_lock(n);
2923
2924 weight = n->weight;
2925
David S. Miller0a7606c2007-10-29 21:28:47 -07002926 /* This NAPI_STATE_SCHED test is for avoiding a race
2927 * with netpoll's poll_napi(). Only the entity which
2928 * obtains the lock and sees NAPI_STATE_SCHED set will
2929 * actually make the ->poll() call. Therefore we avoid
2930 * accidently calling ->poll() when NAPI is not scheduled.
2931 */
2932 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002933 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002934 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002935 trace_napi_poll(n);
2936 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002937
2938 WARN_ON_ONCE(work > weight);
2939
2940 budget -= work;
2941
2942 local_irq_disable();
2943
2944 /* Drivers must not modify the NAPI state if they
2945 * consume the entire weight. In such cases this code
2946 * still "owns" the NAPI instance and therefore can
2947 * move the instance around on the list at-will.
2948 */
David S. Millerfed17f32008-01-07 21:00:40 -08002949 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002950 if (unlikely(napi_disable_pending(n))) {
2951 local_irq_enable();
2952 napi_complete(n);
2953 local_irq_disable();
2954 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002955 list_move_tail(&n->poll_list, list);
2956 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002957
2958 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 }
2960out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002961 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002962
Chris Leechdb217332006-06-17 21:24:58 -07002963#ifdef CONFIG_NET_DMA
2964 /*
2965 * There may not be any more sk_buffs coming right now, so push
2966 * any pending DMA copies to hardware
2967 */
Dan Williams2ba05622009-01-06 11:38:14 -07002968 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002969#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 return;
2972
2973softnet_break:
2974 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2975 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2976 goto out;
2977}
2978
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002979static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
2981/**
2982 * register_gifconf - register a SIOCGIF handler
2983 * @family: Address family
2984 * @gifconf: Function handler
2985 *
2986 * Register protocol dependent address dumping routines. The handler
2987 * that is passed must not be freed or reused until it has been replaced
2988 * by another handler.
2989 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002990int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991{
2992 if (family >= NPROTO)
2993 return -EINVAL;
2994 gifconf_list[family] = gifconf;
2995 return 0;
2996}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002997EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998
2999
3000/*
3001 * Map an interface index to its name (SIOCGIFNAME)
3002 */
3003
3004/*
3005 * We need this ioctl for efficient implementation of the
3006 * if_indextoname() function required by the IPv6 API. Without
3007 * it, we would have to search all the interfaces to find a
3008 * match. --pb
3009 */
3010
Eric W. Biederman881d9662007-09-17 11:56:21 -07003011static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012{
3013 struct net_device *dev;
3014 struct ifreq ifr;
3015
3016 /*
3017 * Fetch the caller's info block.
3018 */
3019
3020 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3021 return -EFAULT;
3022
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003023 rcu_read_lock();
3024 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003026 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 return -ENODEV;
3028 }
3029
3030 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003031 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032
3033 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3034 return -EFAULT;
3035 return 0;
3036}
3037
3038/*
3039 * Perform a SIOCGIFCONF call. This structure will change
3040 * size eventually, and there is nothing I can do about it.
3041 * Thus we will need a 'compatibility mode'.
3042 */
3043
Eric W. Biederman881d9662007-09-17 11:56:21 -07003044static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045{
3046 struct ifconf ifc;
3047 struct net_device *dev;
3048 char __user *pos;
3049 int len;
3050 int total;
3051 int i;
3052
3053 /*
3054 * Fetch the caller's info block.
3055 */
3056
3057 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3058 return -EFAULT;
3059
3060 pos = ifc.ifc_buf;
3061 len = ifc.ifc_len;
3062
3063 /*
3064 * Loop over the interfaces, and write an info block for each.
3065 */
3066
3067 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003068 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 for (i = 0; i < NPROTO; i++) {
3070 if (gifconf_list[i]) {
3071 int done;
3072 if (!pos)
3073 done = gifconf_list[i](dev, NULL, 0);
3074 else
3075 done = gifconf_list[i](dev, pos + total,
3076 len - total);
3077 if (done < 0)
3078 return -EFAULT;
3079 total += done;
3080 }
3081 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003082 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
3084 /*
3085 * All done. Write the updated control block back to the caller.
3086 */
3087 ifc.ifc_len = total;
3088
3089 /*
3090 * Both BSD and Solaris return 0 here, so we do too.
3091 */
3092 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3093}
3094
3095#ifdef CONFIG_PROC_FS
3096/*
3097 * This is invoked by the /proc filesystem handler to display a device
3098 * in detail.
3099 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003101 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102{
Denis V. Luneve372c412007-11-19 22:31:54 -08003103 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003104 loff_t off;
3105 struct net_device *dev;
3106
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003107 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003108 if (!*pos)
3109 return SEQ_START_TOKEN;
3110
3111 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003112 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003113 if (off++ == *pos)
3114 return dev;
3115
3116 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117}
3118
3119void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3120{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003121 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3122 first_net_device(seq_file_net(seq)) :
3123 next_net_device((struct net_device *)v);
3124
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003126 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127}
3128
3129void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003130 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003132 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133}
3134
3135static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3136{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003137 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138
Rusty Russell5a1b5892007-04-28 21:04:03 -07003139 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3140 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3141 dev->name, stats->rx_bytes, stats->rx_packets,
3142 stats->rx_errors,
3143 stats->rx_dropped + stats->rx_missed_errors,
3144 stats->rx_fifo_errors,
3145 stats->rx_length_errors + stats->rx_over_errors +
3146 stats->rx_crc_errors + stats->rx_frame_errors,
3147 stats->rx_compressed, stats->multicast,
3148 stats->tx_bytes, stats->tx_packets,
3149 stats->tx_errors, stats->tx_dropped,
3150 stats->tx_fifo_errors, stats->collisions,
3151 stats->tx_carrier_errors +
3152 stats->tx_aborted_errors +
3153 stats->tx_window_errors +
3154 stats->tx_heartbeat_errors,
3155 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156}
3157
3158/*
3159 * Called from the PROCfs module. This now uses the new arbitrary sized
3160 * /proc/net interface to create /proc/net/dev
3161 */
3162static int dev_seq_show(struct seq_file *seq, void *v)
3163{
3164 if (v == SEQ_START_TOKEN)
3165 seq_puts(seq, "Inter-| Receive "
3166 " | Transmit\n"
3167 " face |bytes packets errs drop fifo frame "
3168 "compressed multicast|bytes packets errs "
3169 "drop fifo colls carrier compressed\n");
3170 else
3171 dev_seq_printf_stats(seq, v);
3172 return 0;
3173}
3174
3175static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3176{
3177 struct netif_rx_stats *rc = NULL;
3178
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003179 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003180 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 rc = &per_cpu(netdev_rx_stat, *pos);
3182 break;
3183 } else
3184 ++*pos;
3185 return rc;
3186}
3187
3188static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3189{
3190 return softnet_get_online(pos);
3191}
3192
3193static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3194{
3195 ++*pos;
3196 return softnet_get_online(pos);
3197}
3198
3199static void softnet_seq_stop(struct seq_file *seq, void *v)
3200{
3201}
3202
3203static int softnet_seq_show(struct seq_file *seq, void *v)
3204{
3205 struct netif_rx_stats *s = v;
3206
3207 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003208 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003209 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003210 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 return 0;
3212}
3213
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003214static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 .start = dev_seq_start,
3216 .next = dev_seq_next,
3217 .stop = dev_seq_stop,
3218 .show = dev_seq_show,
3219};
3220
3221static int dev_seq_open(struct inode *inode, struct file *file)
3222{
Denis V. Luneve372c412007-11-19 22:31:54 -08003223 return seq_open_net(inode, file, &dev_seq_ops,
3224 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225}
3226
Arjan van de Ven9a321442007-02-12 00:55:35 -08003227static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 .owner = THIS_MODULE,
3229 .open = dev_seq_open,
3230 .read = seq_read,
3231 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003232 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233};
3234
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003235static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 .start = softnet_seq_start,
3237 .next = softnet_seq_next,
3238 .stop = softnet_seq_stop,
3239 .show = softnet_seq_show,
3240};
3241
3242static int softnet_seq_open(struct inode *inode, struct file *file)
3243{
3244 return seq_open(file, &softnet_seq_ops);
3245}
3246
Arjan van de Ven9a321442007-02-12 00:55:35 -08003247static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 .owner = THIS_MODULE,
3249 .open = softnet_seq_open,
3250 .read = seq_read,
3251 .llseek = seq_lseek,
3252 .release = seq_release,
3253};
3254
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003255static void *ptype_get_idx(loff_t pos)
3256{
3257 struct packet_type *pt = NULL;
3258 loff_t i = 0;
3259 int t;
3260
3261 list_for_each_entry_rcu(pt, &ptype_all, list) {
3262 if (i == pos)
3263 return pt;
3264 ++i;
3265 }
3266
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003267 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003268 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3269 if (i == pos)
3270 return pt;
3271 ++i;
3272 }
3273 }
3274 return NULL;
3275}
3276
3277static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a422008-01-21 02:27:29 -08003278 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003279{
3280 rcu_read_lock();
3281 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3282}
3283
3284static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3285{
3286 struct packet_type *pt;
3287 struct list_head *nxt;
3288 int hash;
3289
3290 ++*pos;
3291 if (v == SEQ_START_TOKEN)
3292 return ptype_get_idx(0);
3293
3294 pt = v;
3295 nxt = pt->list.next;
3296 if (pt->type == htons(ETH_P_ALL)) {
3297 if (nxt != &ptype_all)
3298 goto found;
3299 hash = 0;
3300 nxt = ptype_base[0].next;
3301 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003302 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003303
3304 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003305 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003306 return NULL;
3307 nxt = ptype_base[hash].next;
3308 }
3309found:
3310 return list_entry(nxt, struct packet_type, list);
3311}
3312
3313static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a422008-01-21 02:27:29 -08003314 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003315{
3316 rcu_read_unlock();
3317}
3318
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003319static int ptype_seq_show(struct seq_file *seq, void *v)
3320{
3321 struct packet_type *pt = v;
3322
3323 if (v == SEQ_START_TOKEN)
3324 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003325 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003326 if (pt->type == htons(ETH_P_ALL))
3327 seq_puts(seq, "ALL ");
3328 else
3329 seq_printf(seq, "%04x", ntohs(pt->type));
3330
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003331 seq_printf(seq, " %-8s %pF\n",
3332 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003333 }
3334
3335 return 0;
3336}
3337
3338static const struct seq_operations ptype_seq_ops = {
3339 .start = ptype_seq_start,
3340 .next = ptype_seq_next,
3341 .stop = ptype_seq_stop,
3342 .show = ptype_seq_show,
3343};
3344
3345static int ptype_seq_open(struct inode *inode, struct file *file)
3346{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003347 return seq_open_net(inode, file, &ptype_seq_ops,
3348 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003349}
3350
3351static const struct file_operations ptype_seq_fops = {
3352 .owner = THIS_MODULE,
3353 .open = ptype_seq_open,
3354 .read = seq_read,
3355 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003356 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003357};
3358
3359
Pavel Emelyanov46650792007-10-08 20:38:39 -07003360static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361{
3362 int rc = -ENOMEM;
3363
Eric W. Biederman881d9662007-09-17 11:56:21 -07003364 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003366 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003368 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003369 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003370
Eric W. Biederman881d9662007-09-17 11:56:21 -07003371 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003372 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 rc = 0;
3374out:
3375 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003376out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003377 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003379 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003381 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 goto out;
3383}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003384
Pavel Emelyanov46650792007-10-08 20:38:39 -07003385static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003386{
3387 wext_proc_exit(net);
3388
3389 proc_net_remove(net, "ptype");
3390 proc_net_remove(net, "softnet_stat");
3391 proc_net_remove(net, "dev");
3392}
3393
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003394static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003395 .init = dev_proc_net_init,
3396 .exit = dev_proc_net_exit,
3397};
3398
3399static int __init dev_proc_init(void)
3400{
3401 return register_pernet_subsys(&dev_proc_ops);
3402}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403#else
3404#define dev_proc_init() 0
3405#endif /* CONFIG_PROC_FS */
3406
3407
3408/**
3409 * netdev_set_master - set up master/slave pair
3410 * @slave: slave device
3411 * @master: new master device
3412 *
3413 * Changes the master device of the slave. Pass %NULL to break the
3414 * bonding. The caller must hold the RTNL semaphore. On a failure
3415 * a negative errno code is returned. On success the reference counts
3416 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3417 * function returns zero.
3418 */
3419int netdev_set_master(struct net_device *slave, struct net_device *master)
3420{
3421 struct net_device *old = slave->master;
3422
3423 ASSERT_RTNL();
3424
3425 if (master) {
3426 if (old)
3427 return -EBUSY;
3428 dev_hold(master);
3429 }
3430
3431 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003432
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 synchronize_net();
3434
3435 if (old)
3436 dev_put(old);
3437
3438 if (master)
3439 slave->flags |= IFF_SLAVE;
3440 else
3441 slave->flags &= ~IFF_SLAVE;
3442
3443 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3444 return 0;
3445}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003446EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003448static void dev_change_rx_flags(struct net_device *dev, int flags)
3449{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003450 const struct net_device_ops *ops = dev->netdev_ops;
3451
3452 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3453 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003454}
3455
Wang Chendad9b332008-06-18 01:48:28 -07003456static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003457{
3458 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003459 uid_t uid;
3460 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003461
Patrick McHardy24023452007-07-14 18:51:31 -07003462 ASSERT_RTNL();
3463
Wang Chendad9b332008-06-18 01:48:28 -07003464 dev->flags |= IFF_PROMISC;
3465 dev->promiscuity += inc;
3466 if (dev->promiscuity == 0) {
3467 /*
3468 * Avoid overflow.
3469 * If inc causes overflow, untouch promisc and return error.
3470 */
3471 if (inc < 0)
3472 dev->flags &= ~IFF_PROMISC;
3473 else {
3474 dev->promiscuity -= inc;
3475 printk(KERN_WARNING "%s: promiscuity touches roof, "
3476 "set promiscuity failed, promiscuity feature "
3477 "of device might be broken.\n", dev->name);
3478 return -EOVERFLOW;
3479 }
3480 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003481 if (dev->flags != old_flags) {
3482 printk(KERN_INFO "device %s %s promiscuous mode\n",
3483 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3484 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003485 if (audit_enabled) {
3486 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003487 audit_log(current->audit_context, GFP_ATOMIC,
3488 AUDIT_ANOM_PROMISCUOUS,
3489 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3490 dev->name, (dev->flags & IFF_PROMISC),
3491 (old_flags & IFF_PROMISC),
3492 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003493 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003494 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003495 }
Patrick McHardy24023452007-07-14 18:51:31 -07003496
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003497 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003498 }
Wang Chendad9b332008-06-18 01:48:28 -07003499 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003500}
3501
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502/**
3503 * dev_set_promiscuity - update promiscuity count on a device
3504 * @dev: device
3505 * @inc: modifier
3506 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003507 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 * remains above zero the interface remains promiscuous. Once it hits zero
3509 * the device reverts back to normal filtering operation. A negative inc
3510 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003511 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 */
Wang Chendad9b332008-06-18 01:48:28 -07003513int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514{
3515 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003516 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
Wang Chendad9b332008-06-18 01:48:28 -07003518 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003519 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003520 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003521 if (dev->flags != old_flags)
3522 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003523 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003525EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526
3527/**
3528 * dev_set_allmulti - update allmulti count on a device
3529 * @dev: device
3530 * @inc: modifier
3531 *
3532 * Add or remove reception of all multicast frames to a device. While the
3533 * count in the device remains above zero the interface remains listening
3534 * to all interfaces. Once it hits zero the device reverts back to normal
3535 * filtering operation. A negative @inc value is used to drop the counter
3536 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003537 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 */
3539
Wang Chendad9b332008-06-18 01:48:28 -07003540int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541{
3542 unsigned short old_flags = dev->flags;
3543
Patrick McHardy24023452007-07-14 18:51:31 -07003544 ASSERT_RTNL();
3545
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003547 dev->allmulti += inc;
3548 if (dev->allmulti == 0) {
3549 /*
3550 * Avoid overflow.
3551 * If inc causes overflow, untouch allmulti and return error.
3552 */
3553 if (inc < 0)
3554 dev->flags &= ~IFF_ALLMULTI;
3555 else {
3556 dev->allmulti -= inc;
3557 printk(KERN_WARNING "%s: allmulti touches roof, "
3558 "set allmulti failed, allmulti feature of "
3559 "device might be broken.\n", dev->name);
3560 return -EOVERFLOW;
3561 }
3562 }
Patrick McHardy24023452007-07-14 18:51:31 -07003563 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003564 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003565 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003566 }
Wang Chendad9b332008-06-18 01:48:28 -07003567 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003568}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003569EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003570
3571/*
3572 * Upload unicast and multicast address lists to device and
3573 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003574 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003575 * are present.
3576 */
3577void __dev_set_rx_mode(struct net_device *dev)
3578{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003579 const struct net_device_ops *ops = dev->netdev_ops;
3580
Patrick McHardy4417da62007-06-27 01:28:10 -07003581 /* dev_open will call this function so the list will stay sane. */
3582 if (!(dev->flags&IFF_UP))
3583 return;
3584
3585 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003586 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003587
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003588 if (ops->ndo_set_rx_mode)
3589 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003590 else {
3591 /* Unicast addresses changes may only happen under the rtnl,
3592 * therefore calling __dev_set_promiscuity here is safe.
3593 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003594 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003595 __dev_set_promiscuity(dev, 1);
3596 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003597 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003598 __dev_set_promiscuity(dev, -1);
3599 dev->uc_promisc = 0;
3600 }
3601
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003602 if (ops->ndo_set_multicast_list)
3603 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003604 }
3605}
3606
3607void dev_set_rx_mode(struct net_device *dev)
3608{
David S. Millerb9e40852008-07-15 00:15:08 -07003609 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003610 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003611 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612}
3613
Jiri Pirkof001fde2009-05-05 02:48:28 +00003614/* hw addresses list handling functions */
3615
Jiri Pirko31278e72009-06-17 01:12:19 +00003616static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3617 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003618{
3619 struct netdev_hw_addr *ha;
3620 int alloc_size;
3621
3622 if (addr_len > MAX_ADDR_LEN)
3623 return -EINVAL;
3624
Jiri Pirko31278e72009-06-17 01:12:19 +00003625 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003626 if (!memcmp(ha->addr, addr, addr_len) &&
3627 ha->type == addr_type) {
3628 ha->refcount++;
3629 return 0;
3630 }
3631 }
3632
3633
Jiri Pirkof001fde2009-05-05 02:48:28 +00003634 alloc_size = sizeof(*ha);
3635 if (alloc_size < L1_CACHE_BYTES)
3636 alloc_size = L1_CACHE_BYTES;
3637 ha = kmalloc(alloc_size, GFP_ATOMIC);
3638 if (!ha)
3639 return -ENOMEM;
3640 memcpy(ha->addr, addr, addr_len);
3641 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003642 ha->refcount = 1;
3643 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003644 list_add_tail_rcu(&ha->list, &list->list);
3645 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003646 return 0;
3647}
3648
3649static void ha_rcu_free(struct rcu_head *head)
3650{
3651 struct netdev_hw_addr *ha;
3652
3653 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3654 kfree(ha);
3655}
3656
Jiri Pirko31278e72009-06-17 01:12:19 +00003657static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3658 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003659{
3660 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003661
Jiri Pirko31278e72009-06-17 01:12:19 +00003662 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003663 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003664 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003665 if (--ha->refcount)
3666 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003667 list_del_rcu(&ha->list);
3668 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003669 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003670 return 0;
3671 }
3672 }
3673 return -ENOENT;
3674}
3675
Jiri Pirko31278e72009-06-17 01:12:19 +00003676static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3677 struct netdev_hw_addr_list *from_list,
3678 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003679 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003680{
3681 int err;
3682 struct netdev_hw_addr *ha, *ha2;
3683 unsigned char type;
3684
Jiri Pirko31278e72009-06-17 01:12:19 +00003685 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003686 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003687 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003688 if (err)
3689 goto unroll;
3690 }
3691 return 0;
3692
3693unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003694 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003695 if (ha2 == ha)
3696 break;
3697 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003698 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003699 }
3700 return err;
3701}
3702
Jiri Pirko31278e72009-06-17 01:12:19 +00003703static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3704 struct netdev_hw_addr_list *from_list,
3705 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003706 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003707{
3708 struct netdev_hw_addr *ha;
3709 unsigned char type;
3710
Jiri Pirko31278e72009-06-17 01:12:19 +00003711 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003712 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003713 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003714 }
3715}
3716
Jiri Pirko31278e72009-06-17 01:12:19 +00003717static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3718 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003719 int addr_len)
3720{
3721 int err = 0;
3722 struct netdev_hw_addr *ha, *tmp;
3723
Jiri Pirko31278e72009-06-17 01:12:19 +00003724 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003725 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003726 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003727 addr_len, ha->type);
3728 if (err)
3729 break;
3730 ha->synced = true;
3731 ha->refcount++;
3732 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003733 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3734 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003735 }
3736 }
3737 return err;
3738}
3739
Jiri Pirko31278e72009-06-17 01:12:19 +00003740static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3741 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003742 int addr_len)
3743{
3744 struct netdev_hw_addr *ha, *tmp;
3745
Jiri Pirko31278e72009-06-17 01:12:19 +00003746 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003747 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003748 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003749 addr_len, ha->type);
3750 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003751 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003752 addr_len, ha->type);
3753 }
3754 }
3755}
3756
Jiri Pirko31278e72009-06-17 01:12:19 +00003757static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003758{
3759 struct netdev_hw_addr *ha, *tmp;
3760
Jiri Pirko31278e72009-06-17 01:12:19 +00003761 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003762 list_del_rcu(&ha->list);
3763 call_rcu(&ha->rcu_head, ha_rcu_free);
3764 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003765 list->count = 0;
3766}
3767
3768static void __hw_addr_init(struct netdev_hw_addr_list *list)
3769{
3770 INIT_LIST_HEAD(&list->list);
3771 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003772}
3773
3774/* Device addresses handling functions */
3775
3776static void dev_addr_flush(struct net_device *dev)
3777{
3778 /* rtnl_mutex must be held here */
3779
Jiri Pirko31278e72009-06-17 01:12:19 +00003780 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003781 dev->dev_addr = NULL;
3782}
3783
3784static int dev_addr_init(struct net_device *dev)
3785{
3786 unsigned char addr[MAX_ADDR_LEN];
3787 struct netdev_hw_addr *ha;
3788 int err;
3789
3790 /* rtnl_mutex must be held here */
3791
Jiri Pirko31278e72009-06-17 01:12:19 +00003792 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003793 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003794 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003795 NETDEV_HW_ADDR_T_LAN);
3796 if (!err) {
3797 /*
3798 * Get the first (previously created) address from the list
3799 * and set dev_addr pointer to this location.
3800 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003801 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003802 struct netdev_hw_addr, list);
3803 dev->dev_addr = ha->addr;
3804 }
3805 return err;
3806}
3807
3808/**
3809 * dev_addr_add - Add a device address
3810 * @dev: device
3811 * @addr: address to add
3812 * @addr_type: address type
3813 *
3814 * Add a device address to the device or increase the reference count if
3815 * it already exists.
3816 *
3817 * The caller must hold the rtnl_mutex.
3818 */
3819int dev_addr_add(struct net_device *dev, unsigned char *addr,
3820 unsigned char addr_type)
3821{
3822 int err;
3823
3824 ASSERT_RTNL();
3825
Jiri Pirko31278e72009-06-17 01:12:19 +00003826 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003827 if (!err)
3828 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3829 return err;
3830}
3831EXPORT_SYMBOL(dev_addr_add);
3832
3833/**
3834 * dev_addr_del - Release a device address.
3835 * @dev: device
3836 * @addr: address to delete
3837 * @addr_type: address type
3838 *
3839 * Release reference to a device address and remove it from the device
3840 * if the reference count drops to zero.
3841 *
3842 * The caller must hold the rtnl_mutex.
3843 */
3844int dev_addr_del(struct net_device *dev, unsigned char *addr,
3845 unsigned char addr_type)
3846{
3847 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003848 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003849
3850 ASSERT_RTNL();
3851
Jiri Pirkoccffad252009-05-22 23:22:17 +00003852 /*
3853 * We can not remove the first address from the list because
3854 * dev->dev_addr points to that.
3855 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003856 ha = list_first_entry(&dev->dev_addrs.list,
3857 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003858 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3859 return -ENOENT;
3860
Jiri Pirko31278e72009-06-17 01:12:19 +00003861 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003862 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003863 if (!err)
3864 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3865 return err;
3866}
3867EXPORT_SYMBOL(dev_addr_del);
3868
3869/**
3870 * dev_addr_add_multiple - Add device addresses from another device
3871 * @to_dev: device to which addresses will be added
3872 * @from_dev: device from which addresses will be added
3873 * @addr_type: address type - 0 means type will be used from from_dev
3874 *
3875 * Add device addresses of the one device to another.
3876 **
3877 * The caller must hold the rtnl_mutex.
3878 */
3879int dev_addr_add_multiple(struct net_device *to_dev,
3880 struct net_device *from_dev,
3881 unsigned char addr_type)
3882{
3883 int err;
3884
3885 ASSERT_RTNL();
3886
3887 if (from_dev->addr_len != to_dev->addr_len)
3888 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003889 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003890 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003891 if (!err)
3892 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3893 return err;
3894}
3895EXPORT_SYMBOL(dev_addr_add_multiple);
3896
3897/**
3898 * dev_addr_del_multiple - Delete device addresses by another device
3899 * @to_dev: device where the addresses will be deleted
3900 * @from_dev: device by which addresses the addresses will be deleted
3901 * @addr_type: address type - 0 means type will used from from_dev
3902 *
3903 * Deletes addresses in to device by the list of addresses in from device.
3904 *
3905 * The caller must hold the rtnl_mutex.
3906 */
3907int dev_addr_del_multiple(struct net_device *to_dev,
3908 struct net_device *from_dev,
3909 unsigned char addr_type)
3910{
3911 ASSERT_RTNL();
3912
3913 if (from_dev->addr_len != to_dev->addr_len)
3914 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003915 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003916 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003917 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3918 return 0;
3919}
3920EXPORT_SYMBOL(dev_addr_del_multiple);
3921
Jiri Pirko31278e72009-06-17 01:12:19 +00003922/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003923
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003924int __dev_addr_delete(struct dev_addr_list **list, int *count,
3925 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003926{
3927 struct dev_addr_list *da;
3928
3929 for (; (da = *list) != NULL; list = &da->next) {
3930 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3931 alen == da->da_addrlen) {
3932 if (glbl) {
3933 int old_glbl = da->da_gusers;
3934 da->da_gusers = 0;
3935 if (old_glbl == 0)
3936 break;
3937 }
3938 if (--da->da_users)
3939 return 0;
3940
3941 *list = da->next;
3942 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003943 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003944 return 0;
3945 }
3946 }
3947 return -ENOENT;
3948}
3949
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003950int __dev_addr_add(struct dev_addr_list **list, int *count,
3951 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003952{
3953 struct dev_addr_list *da;
3954
3955 for (da = *list; da != NULL; da = da->next) {
3956 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3957 da->da_addrlen == alen) {
3958 if (glbl) {
3959 int old_glbl = da->da_gusers;
3960 da->da_gusers = 1;
3961 if (old_glbl)
3962 return 0;
3963 }
3964 da->da_users++;
3965 return 0;
3966 }
3967 }
3968
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003969 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003970 if (da == NULL)
3971 return -ENOMEM;
3972 memcpy(da->da_addr, addr, alen);
3973 da->da_addrlen = alen;
3974 da->da_users = 1;
3975 da->da_gusers = glbl ? 1 : 0;
3976 da->next = *list;
3977 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003978 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003979 return 0;
3980}
3981
Patrick McHardy4417da62007-06-27 01:28:10 -07003982/**
3983 * dev_unicast_delete - Release secondary unicast address.
3984 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003985 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003986 *
3987 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003988 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003989 *
3990 * The caller must hold the rtnl_mutex.
3991 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003992int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003993{
3994 int err;
3995
3996 ASSERT_RTNL();
3997
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003998 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003999 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4000 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004001 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004002 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004003 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004004 return err;
4005}
4006EXPORT_SYMBOL(dev_unicast_delete);
4007
4008/**
4009 * dev_unicast_add - add a secondary unicast address
4010 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07004011 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07004012 *
4013 * Add a secondary unicast address to the device or increase
4014 * the reference count if it already exists.
4015 *
4016 * The caller must hold the rtnl_mutex.
4017 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004018int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07004019{
4020 int err;
4021
4022 ASSERT_RTNL();
4023
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004024 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004025 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4026 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004027 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004028 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004029 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004030 return err;
4031}
4032EXPORT_SYMBOL(dev_unicast_add);
4033
Chris Leeche83a2ea2008-01-31 16:53:23 -08004034int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4035 struct dev_addr_list **from, int *from_count)
4036{
4037 struct dev_addr_list *da, *next;
4038 int err = 0;
4039
4040 da = *from;
4041 while (da != NULL) {
4042 next = da->next;
4043 if (!da->da_synced) {
4044 err = __dev_addr_add(to, to_count,
4045 da->da_addr, da->da_addrlen, 0);
4046 if (err < 0)
4047 break;
4048 da->da_synced = 1;
4049 da->da_users++;
4050 } else if (da->da_users == 1) {
4051 __dev_addr_delete(to, to_count,
4052 da->da_addr, da->da_addrlen, 0);
4053 __dev_addr_delete(from, from_count,
4054 da->da_addr, da->da_addrlen, 0);
4055 }
4056 da = next;
4057 }
4058 return err;
4059}
Johannes Bergc4029082009-06-17 17:43:30 +02004060EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004061
4062void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4063 struct dev_addr_list **from, int *from_count)
4064{
4065 struct dev_addr_list *da, *next;
4066
4067 da = *from;
4068 while (da != NULL) {
4069 next = da->next;
4070 if (da->da_synced) {
4071 __dev_addr_delete(to, to_count,
4072 da->da_addr, da->da_addrlen, 0);
4073 da->da_synced = 0;
4074 __dev_addr_delete(from, from_count,
4075 da->da_addr, da->da_addrlen, 0);
4076 }
4077 da = next;
4078 }
4079}
Johannes Bergc4029082009-06-17 17:43:30 +02004080EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004081
4082/**
4083 * dev_unicast_sync - Synchronize device's unicast list to another device
4084 * @to: destination device
4085 * @from: source device
4086 *
4087 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004088 * addresses that have no users left. The source device must be
4089 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004090 *
4091 * This function is intended to be called from the dev->set_rx_mode
4092 * function of layered software devices.
4093 */
4094int dev_unicast_sync(struct net_device *to, struct net_device *from)
4095{
4096 int err = 0;
4097
Jiri Pirkoccffad252009-05-22 23:22:17 +00004098 if (to->addr_len != from->addr_len)
4099 return -EINVAL;
4100
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004101 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004102 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004103 if (!err)
4104 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004105 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004106 return err;
4107}
4108EXPORT_SYMBOL(dev_unicast_sync);
4109
4110/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004111 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004112 * @to: destination device
4113 * @from: source device
4114 *
4115 * Remove all addresses that were added to the destination device by
4116 * dev_unicast_sync(). This function is intended to be called from the
4117 * dev->stop function of layered software devices.
4118 */
4119void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4120{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004121 if (to->addr_len != from->addr_len)
4122 return;
4123
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004124 netif_addr_lock_bh(from);
4125 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004126 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004127 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004128 netif_addr_unlock(to);
4129 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004130}
4131EXPORT_SYMBOL(dev_unicast_unsync);
4132
Jiri Pirkoccffad252009-05-22 23:22:17 +00004133static void dev_unicast_flush(struct net_device *dev)
4134{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004135 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004136 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004137 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004138}
4139
4140static void dev_unicast_init(struct net_device *dev)
4141{
Jiri Pirko31278e72009-06-17 01:12:19 +00004142 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004143}
4144
4145
Denis Cheng12972622007-07-18 02:12:56 -07004146static void __dev_addr_discard(struct dev_addr_list **list)
4147{
4148 struct dev_addr_list *tmp;
4149
4150 while (*list != NULL) {
4151 tmp = *list;
4152 *list = tmp->next;
4153 if (tmp->da_users > tmp->da_gusers)
4154 printk("__dev_addr_discard: address leakage! "
4155 "da_users=%d\n", tmp->da_users);
4156 kfree(tmp);
4157 }
4158}
4159
Denis Cheng26cc2522007-07-18 02:12:03 -07004160static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004161{
David S. Millerb9e40852008-07-15 00:15:08 -07004162 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004163
Denis Cheng456ad752007-07-18 02:10:54 -07004164 __dev_addr_discard(&dev->mc_list);
4165 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004166
David S. Millerb9e40852008-07-15 00:15:08 -07004167 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004168}
4169
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004170/**
4171 * dev_get_flags - get flags reported to userspace
4172 * @dev: device
4173 *
4174 * Get the combination of flag bits exported through APIs to userspace.
4175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176unsigned dev_get_flags(const struct net_device *dev)
4177{
4178 unsigned flags;
4179
4180 flags = (dev->flags & ~(IFF_PROMISC |
4181 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004182 IFF_RUNNING |
4183 IFF_LOWER_UP |
4184 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 (dev->gflags & (IFF_PROMISC |
4186 IFF_ALLMULTI));
4187
Stefan Rompfb00055a2006-03-20 17:09:11 -08004188 if (netif_running(dev)) {
4189 if (netif_oper_up(dev))
4190 flags |= IFF_RUNNING;
4191 if (netif_carrier_ok(dev))
4192 flags |= IFF_LOWER_UP;
4193 if (netif_dormant(dev))
4194 flags |= IFF_DORMANT;
4195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196
4197 return flags;
4198}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004199EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004201/**
4202 * dev_change_flags - change device settings
4203 * @dev: device
4204 * @flags: device state flags
4205 *
4206 * Change settings on device based state flags. The flags are
4207 * in the userspace exported format.
4208 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209int dev_change_flags(struct net_device *dev, unsigned flags)
4210{
Thomas Graf7c355f52007-06-05 16:03:03 -07004211 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 int old_flags = dev->flags;
4213
Patrick McHardy24023452007-07-14 18:51:31 -07004214 ASSERT_RTNL();
4215
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 /*
4217 * Set the flags on our device.
4218 */
4219
4220 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4221 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4222 IFF_AUTOMEDIA)) |
4223 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4224 IFF_ALLMULTI));
4225
4226 /*
4227 * Load in the correct multicast list now the flags have changed.
4228 */
4229
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004230 if ((old_flags ^ flags) & IFF_MULTICAST)
4231 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004232
Patrick McHardy4417da62007-06-27 01:28:10 -07004233 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234
4235 /*
4236 * Have we downed the interface. We handle IFF_UP ourselves
4237 * according to user attempts to set it, rather than blindly
4238 * setting it.
4239 */
4240
4241 ret = 0;
4242 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4243 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4244
4245 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004246 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 }
4248
4249 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004250 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004252 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
4254 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004255 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4256
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 dev->gflags ^= IFF_PROMISC;
4258 dev_set_promiscuity(dev, inc);
4259 }
4260
4261 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4262 is important. Some (broken) drivers set IFF_PROMISC, when
4263 IFF_ALLMULTI is requested not asking us and not reporting.
4264 */
4265 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004266 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4267
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 dev->gflags ^= IFF_ALLMULTI;
4269 dev_set_allmulti(dev, inc);
4270 }
4271
Thomas Graf7c355f52007-06-05 16:03:03 -07004272 /* Exclude state transition flags, already notified */
4273 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4274 if (changes)
4275 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
4277 return ret;
4278}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004279EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004281/**
4282 * dev_set_mtu - Change maximum transfer unit
4283 * @dev: device
4284 * @new_mtu: new transfer unit
4285 *
4286 * Change the maximum transfer size of the network device.
4287 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288int dev_set_mtu(struct net_device *dev, int new_mtu)
4289{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004290 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 int err;
4292
4293 if (new_mtu == dev->mtu)
4294 return 0;
4295
4296 /* MTU must be positive. */
4297 if (new_mtu < 0)
4298 return -EINVAL;
4299
4300 if (!netif_device_present(dev))
4301 return -ENODEV;
4302
4303 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004304 if (ops->ndo_change_mtu)
4305 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 else
4307 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004308
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004310 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 return err;
4312}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004313EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004315/**
4316 * dev_set_mac_address - Change Media Access Control Address
4317 * @dev: device
4318 * @sa: new address
4319 *
4320 * Change the hardware (MAC) address of the device
4321 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4323{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004324 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 int err;
4326
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004327 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 return -EOPNOTSUPP;
4329 if (sa->sa_family != dev->type)
4330 return -EINVAL;
4331 if (!netif_device_present(dev))
4332 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004333 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004335 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 return err;
4337}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004338EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
4340/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004341 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004343static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344{
4345 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004346 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
4348 if (!dev)
4349 return -ENODEV;
4350
4351 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004352 case SIOCGIFFLAGS: /* Get interface flags */
4353 ifr->ifr_flags = (short) dev_get_flags(dev);
4354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004356 case SIOCGIFMETRIC: /* Get the metric on the interface
4357 (currently unused) */
4358 ifr->ifr_metric = 0;
4359 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004361 case SIOCGIFMTU: /* Get the MTU of a device */
4362 ifr->ifr_mtu = dev->mtu;
4363 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004365 case SIOCGIFHWADDR:
4366 if (!dev->addr_len)
4367 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4368 else
4369 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4370 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4371 ifr->ifr_hwaddr.sa_family = dev->type;
4372 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004374 case SIOCGIFSLAVE:
4375 err = -EINVAL;
4376 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004377
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004378 case SIOCGIFMAP:
4379 ifr->ifr_map.mem_start = dev->mem_start;
4380 ifr->ifr_map.mem_end = dev->mem_end;
4381 ifr->ifr_map.base_addr = dev->base_addr;
4382 ifr->ifr_map.irq = dev->irq;
4383 ifr->ifr_map.dma = dev->dma;
4384 ifr->ifr_map.port = dev->if_port;
4385 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004386
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004387 case SIOCGIFINDEX:
4388 ifr->ifr_ifindex = dev->ifindex;
4389 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004390
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004391 case SIOCGIFTXQLEN:
4392 ifr->ifr_qlen = dev->tx_queue_len;
4393 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004394
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004395 default:
4396 /* dev_ioctl() should ensure this case
4397 * is never reached
4398 */
4399 WARN_ON(1);
4400 err = -EINVAL;
4401 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004402
4403 }
4404 return err;
4405}
4406
4407/*
4408 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4409 */
4410static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4411{
4412 int err;
4413 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004414 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004415
4416 if (!dev)
4417 return -ENODEV;
4418
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004419 ops = dev->netdev_ops;
4420
Jeff Garzik14e3e072007-10-08 00:06:32 -07004421 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004422 case SIOCSIFFLAGS: /* Set interface flags */
4423 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004424
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004425 case SIOCSIFMETRIC: /* Set the metric on the interface
4426 (currently unused) */
4427 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004428
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004429 case SIOCSIFMTU: /* Set the MTU of a device */
4430 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004431
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004432 case SIOCSIFHWADDR:
4433 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004435 case SIOCSIFHWBROADCAST:
4436 if (ifr->ifr_hwaddr.sa_family != dev->type)
4437 return -EINVAL;
4438 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4439 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4440 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4441 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004443 case SIOCSIFMAP:
4444 if (ops->ndo_set_config) {
4445 if (!netif_device_present(dev))
4446 return -ENODEV;
4447 return ops->ndo_set_config(dev, &ifr->ifr_map);
4448 }
4449 return -EOPNOTSUPP;
4450
4451 case SIOCADDMULTI:
4452 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4453 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4454 return -EINVAL;
4455 if (!netif_device_present(dev))
4456 return -ENODEV;
4457 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4458 dev->addr_len, 1);
4459
4460 case SIOCDELMULTI:
4461 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4462 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4463 return -EINVAL;
4464 if (!netif_device_present(dev))
4465 return -ENODEV;
4466 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4467 dev->addr_len, 1);
4468
4469 case SIOCSIFTXQLEN:
4470 if (ifr->ifr_qlen < 0)
4471 return -EINVAL;
4472 dev->tx_queue_len = ifr->ifr_qlen;
4473 return 0;
4474
4475 case SIOCSIFNAME:
4476 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4477 return dev_change_name(dev, ifr->ifr_newname);
4478
4479 /*
4480 * Unknown or private ioctl
4481 */
4482 default:
4483 if ((cmd >= SIOCDEVPRIVATE &&
4484 cmd <= SIOCDEVPRIVATE + 15) ||
4485 cmd == SIOCBONDENSLAVE ||
4486 cmd == SIOCBONDRELEASE ||
4487 cmd == SIOCBONDSETHWADDR ||
4488 cmd == SIOCBONDSLAVEINFOQUERY ||
4489 cmd == SIOCBONDINFOQUERY ||
4490 cmd == SIOCBONDCHANGEACTIVE ||
4491 cmd == SIOCGMIIPHY ||
4492 cmd == SIOCGMIIREG ||
4493 cmd == SIOCSMIIREG ||
4494 cmd == SIOCBRADDIF ||
4495 cmd == SIOCBRDELIF ||
4496 cmd == SIOCSHWTSTAMP ||
4497 cmd == SIOCWANDEV) {
4498 err = -EOPNOTSUPP;
4499 if (ops->ndo_do_ioctl) {
4500 if (netif_device_present(dev))
4501 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4502 else
4503 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004505 } else
4506 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507
4508 }
4509 return err;
4510}
4511
4512/*
4513 * This function handles all "interface"-type I/O control requests. The actual
4514 * 'doing' part of this is dev_ifsioc above.
4515 */
4516
4517/**
4518 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004519 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 * @cmd: command to issue
4521 * @arg: pointer to a struct ifreq in user space
4522 *
4523 * Issue ioctl functions to devices. This is normally called by the
4524 * user space syscall interfaces but can sometimes be useful for
4525 * other purposes. The return value is the return from the syscall if
4526 * positive or a negative errno code on error.
4527 */
4528
Eric W. Biederman881d9662007-09-17 11:56:21 -07004529int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530{
4531 struct ifreq ifr;
4532 int ret;
4533 char *colon;
4534
4535 /* One special case: SIOCGIFCONF takes ifconf argument
4536 and requires shared lock, because it sleeps writing
4537 to user space.
4538 */
4539
4540 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004541 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004542 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004543 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 return ret;
4545 }
4546 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004547 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
4549 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4550 return -EFAULT;
4551
4552 ifr.ifr_name[IFNAMSIZ-1] = 0;
4553
4554 colon = strchr(ifr.ifr_name, ':');
4555 if (colon)
4556 *colon = 0;
4557
4558 /*
4559 * See which interface the caller is talking about.
4560 */
4561
4562 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004563 /*
4564 * These ioctl calls:
4565 * - can be done by all.
4566 * - atomic and do not require locking.
4567 * - return a value
4568 */
4569 case SIOCGIFFLAGS:
4570 case SIOCGIFMETRIC:
4571 case SIOCGIFMTU:
4572 case SIOCGIFHWADDR:
4573 case SIOCGIFSLAVE:
4574 case SIOCGIFMAP:
4575 case SIOCGIFINDEX:
4576 case SIOCGIFTXQLEN:
4577 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004578 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004579 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004580 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004581 if (!ret) {
4582 if (colon)
4583 *colon = ':';
4584 if (copy_to_user(arg, &ifr,
4585 sizeof(struct ifreq)))
4586 ret = -EFAULT;
4587 }
4588 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004590 case SIOCETHTOOL:
4591 dev_load(net, ifr.ifr_name);
4592 rtnl_lock();
4593 ret = dev_ethtool(net, &ifr);
4594 rtnl_unlock();
4595 if (!ret) {
4596 if (colon)
4597 *colon = ':';
4598 if (copy_to_user(arg, &ifr,
4599 sizeof(struct ifreq)))
4600 ret = -EFAULT;
4601 }
4602 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004604 /*
4605 * These ioctl calls:
4606 * - require superuser power.
4607 * - require strict serialization.
4608 * - return a value
4609 */
4610 case SIOCGMIIPHY:
4611 case SIOCGMIIREG:
4612 case SIOCSIFNAME:
4613 if (!capable(CAP_NET_ADMIN))
4614 return -EPERM;
4615 dev_load(net, ifr.ifr_name);
4616 rtnl_lock();
4617 ret = dev_ifsioc(net, &ifr, cmd);
4618 rtnl_unlock();
4619 if (!ret) {
4620 if (colon)
4621 *colon = ':';
4622 if (copy_to_user(arg, &ifr,
4623 sizeof(struct ifreq)))
4624 ret = -EFAULT;
4625 }
4626 return ret;
4627
4628 /*
4629 * These ioctl calls:
4630 * - require superuser power.
4631 * - require strict serialization.
4632 * - do not return a value
4633 */
4634 case SIOCSIFFLAGS:
4635 case SIOCSIFMETRIC:
4636 case SIOCSIFMTU:
4637 case SIOCSIFMAP:
4638 case SIOCSIFHWADDR:
4639 case SIOCSIFSLAVE:
4640 case SIOCADDMULTI:
4641 case SIOCDELMULTI:
4642 case SIOCSIFHWBROADCAST:
4643 case SIOCSIFTXQLEN:
4644 case SIOCSMIIREG:
4645 case SIOCBONDENSLAVE:
4646 case SIOCBONDRELEASE:
4647 case SIOCBONDSETHWADDR:
4648 case SIOCBONDCHANGEACTIVE:
4649 case SIOCBRADDIF:
4650 case SIOCBRDELIF:
4651 case SIOCSHWTSTAMP:
4652 if (!capable(CAP_NET_ADMIN))
4653 return -EPERM;
4654 /* fall through */
4655 case SIOCBONDSLAVEINFOQUERY:
4656 case SIOCBONDINFOQUERY:
4657 dev_load(net, ifr.ifr_name);
4658 rtnl_lock();
4659 ret = dev_ifsioc(net, &ifr, cmd);
4660 rtnl_unlock();
4661 return ret;
4662
4663 case SIOCGIFMEM:
4664 /* Get the per device memory space. We can add this but
4665 * currently do not support it */
4666 case SIOCSIFMEM:
4667 /* Set the per device memory buffer space.
4668 * Not applicable in our case */
4669 case SIOCSIFLINK:
4670 return -EINVAL;
4671
4672 /*
4673 * Unknown or private ioctl.
4674 */
4675 default:
4676 if (cmd == SIOCWANDEV ||
4677 (cmd >= SIOCDEVPRIVATE &&
4678 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004679 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004681 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004683 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004685 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004687 }
4688 /* Take care of Wireless Extensions */
4689 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4690 return wext_handle_ioctl(net, &ifr, cmd, arg);
4691 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692 }
4693}
4694
4695
4696/**
4697 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004698 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 *
4700 * Returns a suitable unique value for a new device interface
4701 * number. The caller must hold the rtnl semaphore or the
4702 * dev_base_lock to be sure it remains unique.
4703 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004704static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705{
4706 static int ifindex;
4707 for (;;) {
4708 if (++ifindex <= 0)
4709 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004710 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 return ifindex;
4712 }
4713}
4714
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004716static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004718static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721}
4722
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004723static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004724{
Octavian Purdila395264d2009-11-16 13:49:35 +00004725 struct net_device *dev, *aux, *fdev;
4726 LIST_HEAD(pernet_list);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004727
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004728 BUG_ON(dev_boot_phase);
4729 ASSERT_RTNL();
4730
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004731 list_for_each_entry(dev, head, unreg_list) {
4732 /* Some devices call without registering
4733 * for initialization unwind.
4734 */
4735 if (dev->reg_state == NETREG_UNINITIALIZED) {
4736 pr_debug("unregister_netdevice: device %s/%p never "
4737 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004738
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004739 WARN_ON(1);
4740 return;
4741 }
4742
4743 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4744
4745 /* If device is running, close it first. */
4746 dev_close(dev);
4747
4748 /* And unlink it from device chain. */
4749 unlist_netdevice(dev);
4750
4751 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004752 }
4753
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004754 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004755
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004756 list_for_each_entry(dev, head, unreg_list) {
4757 /* Shutdown queueing discipline. */
4758 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004759
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004760
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004761 /* Notify protocols, that we are about to destroy
4762 this device. They should clean all the things.
4763 */
4764 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4765
4766 /*
4767 * Flush the unicast and multicast chains
4768 */
4769 dev_unicast_flush(dev);
4770 dev_addr_discard(dev);
4771
4772 if (dev->netdev_ops->ndo_uninit)
4773 dev->netdev_ops->ndo_uninit(dev);
4774
4775 /* Notifier chain MUST detach us from master device. */
4776 WARN_ON(dev->master);
4777
4778 /* Remove entries from kobject tree */
4779 netdev_unregister_kobject(dev);
4780 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004781
4782 synchronize_net();
4783
Octavian Purdila395264d2009-11-16 13:49:35 +00004784 list_for_each_entry_safe(dev, aux, head, unreg_list) {
4785 int new_net = 1;
4786 list_for_each_entry(fdev, &pernet_list, unreg_list) {
4787 if (dev_net(dev) == dev_net(fdev)) {
4788 new_net = 0;
4789 dev_put(dev);
4790 break;
4791 }
4792 }
4793 if (new_net)
4794 list_move(&dev->unreg_list, &pernet_list);
4795 }
4796
4797 list_for_each_entry_safe(dev, aux, &pernet_list, unreg_list) {
4798 call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev);
4799 list_move(&dev->unreg_list, head);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004800 dev_put(dev);
Octavian Purdila395264d2009-11-16 13:49:35 +00004801 }
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004802}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004803
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004804static void rollback_registered(struct net_device *dev)
4805{
4806 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004807
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004808 list_add(&dev->unreg_list, &single);
4809 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004810}
4811
David S. Millere8a04642008-07-17 00:34:19 -07004812static void __netdev_init_queue_locks_one(struct net_device *dev,
4813 struct netdev_queue *dev_queue,
4814 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004815{
4816 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004817 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004818 dev_queue->xmit_lock_owner = -1;
4819}
4820
4821static void netdev_init_queue_locks(struct net_device *dev)
4822{
David S. Millere8a04642008-07-17 00:34:19 -07004823 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4824 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004825}
4826
Herbert Xub63365a2008-10-23 01:11:29 -07004827unsigned long netdev_fix_features(unsigned long features, const char *name)
4828{
4829 /* Fix illegal SG+CSUM combinations. */
4830 if ((features & NETIF_F_SG) &&
4831 !(features & NETIF_F_ALL_CSUM)) {
4832 if (name)
4833 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4834 "checksum feature.\n", name);
4835 features &= ~NETIF_F_SG;
4836 }
4837
4838 /* TSO requires that SG is present as well. */
4839 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4840 if (name)
4841 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4842 "SG feature.\n", name);
4843 features &= ~NETIF_F_TSO;
4844 }
4845
4846 if (features & NETIF_F_UFO) {
4847 if (!(features & NETIF_F_GEN_CSUM)) {
4848 if (name)
4849 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4850 "since no NETIF_F_HW_CSUM feature.\n",
4851 name);
4852 features &= ~NETIF_F_UFO;
4853 }
4854
4855 if (!(features & NETIF_F_SG)) {
4856 if (name)
4857 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4858 "since no NETIF_F_SG feature.\n", name);
4859 features &= ~NETIF_F_UFO;
4860 }
4861 }
4862
4863 return features;
4864}
4865EXPORT_SYMBOL(netdev_fix_features);
4866
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867/**
4868 * register_netdevice - register a network device
4869 * @dev: device to register
4870 *
4871 * Take a completed network device structure and add it to the kernel
4872 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4873 * chain. 0 is returned on success. A negative errno code is returned
4874 * on a failure to set up the device, or if the name is a duplicate.
4875 *
4876 * Callers must hold the rtnl semaphore. You may want
4877 * register_netdev() instead of this.
4878 *
4879 * BUGS:
4880 * The locking appears insufficient to guarantee two parallel registers
4881 * will not get the same name.
4882 */
4883
4884int register_netdevice(struct net_device *dev)
4885{
4886 struct hlist_head *head;
4887 struct hlist_node *p;
4888 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004889 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890
4891 BUG_ON(dev_boot_phase);
4892 ASSERT_RTNL();
4893
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004894 might_sleep();
4895
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896 /* When net_device's are persistent, this will be fatal. */
4897 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004898 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899
David S. Millerf1f28aa2008-07-15 00:08:33 -07004900 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004901 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004902 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 dev->iflink = -1;
4905
4906 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004907 if (dev->netdev_ops->ndo_init) {
4908 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909 if (ret) {
4910 if (ret > 0)
4911 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004912 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913 }
4914 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004915
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916 if (!dev_valid_name(dev->name)) {
4917 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004918 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 }
4920
Eric W. Biederman881d9662007-09-17 11:56:21 -07004921 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 if (dev->iflink == -1)
4923 dev->iflink = dev->ifindex;
4924
4925 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004926 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 hlist_for_each(p, head) {
4928 struct net_device *d
4929 = hlist_entry(p, struct net_device, name_hlist);
4930 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4931 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004932 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004934 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004936 /* Fix illegal checksum combinations */
4937 if ((dev->features & NETIF_F_HW_CSUM) &&
4938 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4939 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4940 dev->name);
4941 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4942 }
4943
4944 if ((dev->features & NETIF_F_NO_CSUM) &&
4945 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4946 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4947 dev->name);
4948 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4949 }
4950
Herbert Xub63365a2008-10-23 01:11:29 -07004951 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004953 /* Enable software GSO if SG is supported. */
4954 if (dev->features & NETIF_F_SG)
4955 dev->features |= NETIF_F_GSO;
4956
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004957 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004958
4959 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4960 ret = notifier_to_errno(ret);
4961 if (ret)
4962 goto err_uninit;
4963
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004964 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004965 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004966 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004967 dev->reg_state = NETREG_REGISTERED;
4968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 /*
4970 * Default initial state at registry is that the
4971 * device is present.
4972 */
4973
4974 set_bit(__LINK_STATE_PRESENT, &dev->state);
4975
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004978 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979
4980 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004981 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004982 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004983 if (ret) {
4984 rollback_registered(dev);
4985 dev->reg_state = NETREG_UNREGISTERED;
4986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987
4988out:
4989 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004990
4991err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004992 if (dev->netdev_ops->ndo_uninit)
4993 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004994 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004996EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997
4998/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004999 * init_dummy_netdev - init a dummy network device for NAPI
5000 * @dev: device to init
5001 *
5002 * This takes a network device structure and initialize the minimum
5003 * amount of fields so it can be used to schedule NAPI polls without
5004 * registering a full blown interface. This is to be used by drivers
5005 * that need to tie several hardware interfaces to a single NAPI
5006 * poll scheduler due to HW limitations.
5007 */
5008int init_dummy_netdev(struct net_device *dev)
5009{
5010 /* Clear everything. Note we don't initialize spinlocks
5011 * are they aren't supposed to be taken by any of the
5012 * NAPI code and this dummy netdev is supposed to be
5013 * only ever used for NAPI polls
5014 */
5015 memset(dev, 0, sizeof(struct net_device));
5016
5017 /* make sure we BUG if trying to hit standard
5018 * register/unregister code path
5019 */
5020 dev->reg_state = NETREG_DUMMY;
5021
5022 /* initialize the ref count */
5023 atomic_set(&dev->refcnt, 1);
5024
5025 /* NAPI wants this */
5026 INIT_LIST_HEAD(&dev->napi_list);
5027
5028 /* a dummy interface is started by default */
5029 set_bit(__LINK_STATE_PRESENT, &dev->state);
5030 set_bit(__LINK_STATE_START, &dev->state);
5031
5032 return 0;
5033}
5034EXPORT_SYMBOL_GPL(init_dummy_netdev);
5035
5036
5037/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 * register_netdev - register a network device
5039 * @dev: device to register
5040 *
5041 * Take a completed network device structure and add it to the kernel
5042 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5043 * chain. 0 is returned on success. A negative errno code is returned
5044 * on a failure to set up the device, or if the name is a duplicate.
5045 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005046 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 * and expands the device name if you passed a format string to
5048 * alloc_netdev.
5049 */
5050int register_netdev(struct net_device *dev)
5051{
5052 int err;
5053
5054 rtnl_lock();
5055
5056 /*
5057 * If the name is a format string the caller wants us to do a
5058 * name allocation.
5059 */
5060 if (strchr(dev->name, '%')) {
5061 err = dev_alloc_name(dev, dev->name);
5062 if (err < 0)
5063 goto out;
5064 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005065
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 err = register_netdevice(dev);
5067out:
5068 rtnl_unlock();
5069 return err;
5070}
5071EXPORT_SYMBOL(register_netdev);
5072
5073/*
5074 * netdev_wait_allrefs - wait until all references are gone.
5075 *
5076 * This is called when unregistering network devices.
5077 *
5078 * Any protocol or device that holds a reference should register
5079 * for netdevice notification, and cleanup and put back the
5080 * reference if they receive an UNREGISTER event.
5081 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005082 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083 */
5084static void netdev_wait_allrefs(struct net_device *dev)
5085{
5086 unsigned long rebroadcast_time, warning_time;
5087
Eric Dumazete014deb2009-11-17 05:59:21 +00005088 linkwatch_forget_dev(dev);
5089
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 rebroadcast_time = warning_time = jiffies;
5091 while (atomic_read(&dev->refcnt) != 0) {
5092 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005093 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094
5095 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005096 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Octavian Purdila395264d2009-11-16 13:49:35 +00005097 /* don't resend NETDEV_UNREGISTER_PERNET, _PERNET users
5098 * should have already handle it the first time */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099
5100 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5101 &dev->state)) {
5102 /* We must not have linkwatch events
5103 * pending on unregister. If this
5104 * happens, we simply run the queue
5105 * unscheduled, resulting in a noop
5106 * for this device.
5107 */
5108 linkwatch_run_queue();
5109 }
5110
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005111 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112
5113 rebroadcast_time = jiffies;
5114 }
5115
5116 msleep(250);
5117
5118 if (time_after(jiffies, warning_time + 10 * HZ)) {
5119 printk(KERN_EMERG "unregister_netdevice: "
5120 "waiting for %s to become free. Usage "
5121 "count = %d\n",
5122 dev->name, atomic_read(&dev->refcnt));
5123 warning_time = jiffies;
5124 }
5125 }
5126}
5127
5128/* The sequence is:
5129 *
5130 * rtnl_lock();
5131 * ...
5132 * register_netdevice(x1);
5133 * register_netdevice(x2);
5134 * ...
5135 * unregister_netdevice(y1);
5136 * unregister_netdevice(y2);
5137 * ...
5138 * rtnl_unlock();
5139 * free_netdev(y1);
5140 * free_netdev(y2);
5141 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005142 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005144 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 * without deadlocking with linkwatch via keventd.
5146 * 2) Since we run with the RTNL semaphore not held, we can sleep
5147 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005148 *
5149 * We must not return until all unregister events added during
5150 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152void netdev_run_todo(void)
5153{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005154 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005157 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005158
5159 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005160
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 while (!list_empty(&list)) {
5162 struct net_device *dev
5163 = list_entry(list.next, struct net_device, todo_list);
5164 list_del(&dev->todo_list);
5165
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005166 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 printk(KERN_ERR "network todo '%s' but state %d\n",
5168 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005169 dump_stack();
5170 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005172
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005173 dev->reg_state = NETREG_UNREGISTERED;
5174
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005175 on_each_cpu(flush_backlog, dev, 1);
5176
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005177 netdev_wait_allrefs(dev);
5178
5179 /* paranoia */
5180 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005181 WARN_ON(dev->ip_ptr);
5182 WARN_ON(dev->ip6_ptr);
5183 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005184
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005185 if (dev->destructor)
5186 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005187
5188 /* Free network device */
5189 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191}
5192
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005193/**
Eric Dumazetd83345a2009-11-16 03:36:51 +00005194 * dev_txq_stats_fold - fold tx_queues stats
5195 * @dev: device to get statistics from
5196 * @stats: struct net_device_stats to hold results
5197 */
5198void dev_txq_stats_fold(const struct net_device *dev,
5199 struct net_device_stats *stats)
5200{
5201 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5202 unsigned int i;
5203 struct netdev_queue *txq;
5204
5205 for (i = 0; i < dev->num_tx_queues; i++) {
5206 txq = netdev_get_tx_queue(dev, i);
5207 tx_bytes += txq->tx_bytes;
5208 tx_packets += txq->tx_packets;
5209 tx_dropped += txq->tx_dropped;
5210 }
5211 if (tx_bytes || tx_packets || tx_dropped) {
5212 stats->tx_bytes = tx_bytes;
5213 stats->tx_packets = tx_packets;
5214 stats->tx_dropped = tx_dropped;
5215 }
5216}
5217EXPORT_SYMBOL(dev_txq_stats_fold);
5218
5219/**
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005220 * dev_get_stats - get network device statistics
5221 * @dev: device to get statistics from
5222 *
5223 * Get network statistics from device. The device driver may provide
5224 * its own method by setting dev->netdev_ops->get_stats; otherwise
5225 * the internal statistics structure is used.
5226 */
5227const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005228{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005229 const struct net_device_ops *ops = dev->netdev_ops;
5230
5231 if (ops->ndo_get_stats)
5232 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005233
Eric Dumazetd83345a2009-11-16 03:36:51 +00005234 dev_txq_stats_fold(dev, &dev->stats);
5235 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07005236}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005237EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005238
David S. Millerdc2b4842008-07-08 17:18:23 -07005239static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005240 struct netdev_queue *queue,
5241 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005242{
David S. Millerdc2b4842008-07-08 17:18:23 -07005243 queue->dev = dev;
5244}
5245
David S. Millerbb949fb2008-07-08 16:55:56 -07005246static void netdev_init_queues(struct net_device *dev)
5247{
David S. Millere8a04642008-07-17 00:34:19 -07005248 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5249 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005250 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005251}
5252
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005254 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 * @sizeof_priv: size of private data to allocate space for
5256 * @name: device name format string
5257 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005258 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 *
5260 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005261 * and performs basic initialization. Also allocates subquue structs
5262 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005264struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5265 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266{
David S. Millere8a04642008-07-17 00:34:19 -07005267 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005269 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005270 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005272 BUG_ON(strlen(name) >= sizeof(dev->name));
5273
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005274 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005275 if (sizeof_priv) {
5276 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005277 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005278 alloc_size += sizeof_priv;
5279 }
5280 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005281 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005283 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005285 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005286 return NULL;
5287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288
Stephen Hemminger79439862008-07-21 13:28:44 -07005289 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005290 if (!tx) {
5291 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5292 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005293 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005294 }
5295
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005296 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005298
5299 if (dev_addr_init(dev))
5300 goto free_tx;
5301
Jiri Pirkoccffad252009-05-22 23:22:17 +00005302 dev_unicast_init(dev);
5303
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005304 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305
David S. Millere8a04642008-07-17 00:34:19 -07005306 dev->_tx = tx;
5307 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005308 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005309
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005310 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311
David S. Millerbb949fb2008-07-08 16:55:56 -07005312 netdev_init_queues(dev);
5313
Herbert Xud565b0a2008-12-15 23:38:52 -08005314 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005315 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazete014deb2009-11-17 05:59:21 +00005316 INIT_LIST_HEAD(&dev->link_watch_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005317 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 setup(dev);
5319 strcpy(dev->name, name);
5320 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005321
5322free_tx:
5323 kfree(tx);
5324
5325free_p:
5326 kfree(p);
5327 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005329EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330
5331/**
5332 * free_netdev - free network device
5333 * @dev: device
5334 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005335 * This function does the last stage of destroying an allocated device
5336 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005337 * If this is the last reference then it will be freed.
5338 */
5339void free_netdev(struct net_device *dev)
5340{
Herbert Xud565b0a2008-12-15 23:38:52 -08005341 struct napi_struct *p, *n;
5342
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005343 release_net(dev_net(dev));
5344
David S. Millere8a04642008-07-17 00:34:19 -07005345 kfree(dev->_tx);
5346
Jiri Pirkof001fde2009-05-05 02:48:28 +00005347 /* Flush device addresses */
5348 dev_addr_flush(dev);
5349
Herbert Xud565b0a2008-12-15 23:38:52 -08005350 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5351 netif_napi_del(p);
5352
Stephen Hemminger3041a062006-05-26 13:25:24 -07005353 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005354 if (dev->reg_state == NETREG_UNINITIALIZED) {
5355 kfree((char *)dev - dev->padded);
5356 return;
5357 }
5358
5359 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5360 dev->reg_state = NETREG_RELEASED;
5361
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005362 /* will free via device release */
5363 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005364}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005365EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005366
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005367/**
5368 * synchronize_net - Synchronize with packet receive processing
5369 *
5370 * Wait for packets currently being received to be done.
5371 * Does not block later packets from starting.
5372 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005373void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374{
5375 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005376 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005378EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379
5380/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005381 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005383 * @head: list
5384
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005386 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005387 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 *
5389 * Callers must hold the rtnl semaphore. You may want
5390 * unregister_netdev() instead of this.
5391 */
5392
Eric Dumazet44a08732009-10-27 07:03:04 +00005393void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394{
Herbert Xua6620712007-12-12 19:21:56 -08005395 ASSERT_RTNL();
5396
Eric Dumazet44a08732009-10-27 07:03:04 +00005397 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005398 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005399 } else {
5400 rollback_registered(dev);
5401 /* Finish processing unregister after unlock */
5402 net_set_todo(dev);
5403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404}
Eric Dumazet44a08732009-10-27 07:03:04 +00005405EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406
5407/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005408 * unregister_netdevice_many - unregister many devices
5409 * @head: list of devices
5410 *
Octavian Purdila395264d2009-11-16 13:49:35 +00005411 * WARNING: Calling this modifies the given list
5412 * (in rollback_registered_many). It may change the order of the elements
5413 * in the list. However, you can assume it does not add or delete elements
5414 * to/from the list.
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005415 */
5416void unregister_netdevice_many(struct list_head *head)
5417{
5418 struct net_device *dev;
5419
5420 if (!list_empty(head)) {
5421 rollback_registered_many(head);
5422 list_for_each_entry(dev, head, unreg_list)
5423 net_set_todo(dev);
5424 }
5425}
Eric Dumazet63c80992009-10-27 07:06:49 +00005426EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005427
5428/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 * unregister_netdev - remove device from the kernel
5430 * @dev: device
5431 *
5432 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005433 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434 *
5435 * This is just a wrapper for unregister_netdevice that takes
5436 * the rtnl semaphore. In general you want to use this and not
5437 * unregister_netdevice.
5438 */
5439void unregister_netdev(struct net_device *dev)
5440{
5441 rtnl_lock();
5442 unregister_netdevice(dev);
5443 rtnl_unlock();
5444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445EXPORT_SYMBOL(unregister_netdev);
5446
Eric W. Biedermance286d32007-09-12 13:53:49 +02005447/**
5448 * dev_change_net_namespace - move device to different nethost namespace
5449 * @dev: device
5450 * @net: network namespace
5451 * @pat: If not NULL name pattern to try if the current device name
5452 * is already taken in the destination network namespace.
5453 *
5454 * This function shuts down a device interface and moves it
5455 * to a new network namespace. On success 0 is returned, on
5456 * a failure a netagive errno code is returned.
5457 *
5458 * Callers must hold the rtnl semaphore.
5459 */
5460
5461int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5462{
5463 char buf[IFNAMSIZ];
5464 const char *destname;
5465 int err;
5466
5467 ASSERT_RTNL();
5468
5469 /* Don't allow namespace local devices to be moved. */
5470 err = -EINVAL;
5471 if (dev->features & NETIF_F_NETNS_LOCAL)
5472 goto out;
5473
Eric W. Biederman38918452008-10-27 17:51:47 -07005474#ifdef CONFIG_SYSFS
5475 /* Don't allow real devices to be moved when sysfs
5476 * is enabled.
5477 */
5478 err = -EINVAL;
5479 if (dev->dev.parent)
5480 goto out;
5481#endif
5482
Eric W. Biedermance286d32007-09-12 13:53:49 +02005483 /* Ensure the device has been registrered */
5484 err = -EINVAL;
5485 if (dev->reg_state != NETREG_REGISTERED)
5486 goto out;
5487
5488 /* Get out if there is nothing todo */
5489 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005490 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005491 goto out;
5492
5493 /* Pick the destination device name, and ensure
5494 * we can use it in the destination network namespace.
5495 */
5496 err = -EEXIST;
5497 destname = dev->name;
5498 if (__dev_get_by_name(net, destname)) {
5499 /* We get here if we can't use the current device name */
5500 if (!pat)
5501 goto out;
5502 if (!dev_valid_name(pat))
5503 goto out;
5504 if (strchr(pat, '%')) {
5505 if (__dev_alloc_name(net, pat, buf) < 0)
5506 goto out;
5507 destname = buf;
5508 } else
5509 destname = pat;
5510 if (__dev_get_by_name(net, destname))
5511 goto out;
5512 }
5513
5514 /*
5515 * And now a mini version of register_netdevice unregister_netdevice.
5516 */
5517
5518 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005519 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005520
5521 /* And unlink it from device chain */
5522 err = -ENODEV;
5523 unlist_netdevice(dev);
5524
5525 synchronize_net();
5526
5527 /* Shutdown queueing discipline. */
5528 dev_shutdown(dev);
5529
5530 /* Notify protocols, that we are about to destroy
5531 this device. They should clean all the things.
5532 */
5533 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Octavian Purdila395264d2009-11-16 13:49:35 +00005534 call_netdevice_notifiers(NETDEV_UNREGISTER_PERNET, dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005535
5536 /*
5537 * Flush the unicast and multicast chains
5538 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005539 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005540 dev_addr_discard(dev);
5541
Eric W. Biederman38918452008-10-27 17:51:47 -07005542 netdev_unregister_kobject(dev);
5543
Eric W. Biedermance286d32007-09-12 13:53:49 +02005544 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005545 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005546
5547 /* Assign the new device name */
5548 if (destname != dev->name)
5549 strcpy(dev->name, destname);
5550
5551 /* If there is an ifindex conflict assign a new one */
5552 if (__dev_get_by_index(net, dev->ifindex)) {
5553 int iflink = (dev->iflink == dev->ifindex);
5554 dev->ifindex = dev_new_index(net);
5555 if (iflink)
5556 dev->iflink = dev->ifindex;
5557 }
5558
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005559 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005560 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005561 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005562
5563 /* Add the device back in the hashes */
5564 list_netdevice(dev);
5565
5566 /* Notify protocols, that a new device appeared. */
5567 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5568
5569 synchronize_net();
5570 err = 0;
5571out:
5572 return err;
5573}
Johannes Berg463d0182009-07-14 00:33:35 +02005574EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005575
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576static int dev_cpu_callback(struct notifier_block *nfb,
5577 unsigned long action,
5578 void *ocpu)
5579{
5580 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005581 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582 struct sk_buff *skb;
5583 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5584 struct softnet_data *sd, *oldsd;
5585
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005586 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587 return NOTIFY_OK;
5588
5589 local_irq_disable();
5590 cpu = smp_processor_id();
5591 sd = &per_cpu(softnet_data, cpu);
5592 oldsd = &per_cpu(softnet_data, oldcpu);
5593
5594 /* Find end of our completion_queue. */
5595 list_skb = &sd->completion_queue;
5596 while (*list_skb)
5597 list_skb = &(*list_skb)->next;
5598 /* Append completion queue from offline CPU. */
5599 *list_skb = oldsd->completion_queue;
5600 oldsd->completion_queue = NULL;
5601
5602 /* Find end of our output_queue. */
5603 list_net = &sd->output_queue;
5604 while (*list_net)
5605 list_net = &(*list_net)->next_sched;
5606 /* Append output queue from offline CPU. */
5607 *list_net = oldsd->output_queue;
5608 oldsd->output_queue = NULL;
5609
5610 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5611 local_irq_enable();
5612
5613 /* Process offline CPU's input_pkt_queue */
5614 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5615 netif_rx(skb);
5616
5617 return NOTIFY_OK;
5618}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005619
5620
Herbert Xu7f353bf2007-08-10 15:47:58 -07005621/**
Herbert Xub63365a2008-10-23 01:11:29 -07005622 * netdev_increment_features - increment feature set by one
5623 * @all: current feature set
5624 * @one: new feature set
5625 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005626 *
5627 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005628 * @one to the master device with current feature set @all. Will not
5629 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005630 */
Herbert Xub63365a2008-10-23 01:11:29 -07005631unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5632 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005633{
Herbert Xub63365a2008-10-23 01:11:29 -07005634 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005635 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005636 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5637 else if (mask & NETIF_F_ALL_CSUM) {
5638 /* If one device supports v4/v6 checksumming, set for all. */
5639 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5640 !(all & NETIF_F_GEN_CSUM)) {
5641 all &= ~NETIF_F_ALL_CSUM;
5642 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5643 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005644
Herbert Xub63365a2008-10-23 01:11:29 -07005645 /* If one device supports hw checksumming, set for all. */
5646 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5647 all &= ~NETIF_F_ALL_CSUM;
5648 all |= NETIF_F_HW_CSUM;
5649 }
5650 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005651
Herbert Xub63365a2008-10-23 01:11:29 -07005652 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005653
Herbert Xub63365a2008-10-23 01:11:29 -07005654 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005655 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005656 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005657
5658 return all;
5659}
Herbert Xub63365a2008-10-23 01:11:29 -07005660EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005661
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005662static struct hlist_head *netdev_create_hash(void)
5663{
5664 int i;
5665 struct hlist_head *hash;
5666
5667 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5668 if (hash != NULL)
5669 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5670 INIT_HLIST_HEAD(&hash[i]);
5671
5672 return hash;
5673}
5674
Eric W. Biederman881d9662007-09-17 11:56:21 -07005675/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005676static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005677{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005678 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005679
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005680 net->dev_name_head = netdev_create_hash();
5681 if (net->dev_name_head == NULL)
5682 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005683
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005684 net->dev_index_head = netdev_create_hash();
5685 if (net->dev_index_head == NULL)
5686 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005687
5688 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005689
5690err_idx:
5691 kfree(net->dev_name_head);
5692err_name:
5693 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005694}
5695
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005696/**
5697 * netdev_drivername - network driver for the device
5698 * @dev: network device
5699 * @buffer: buffer for resulting name
5700 * @len: size of buffer
5701 *
5702 * Determine network driver for device.
5703 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005704char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005705{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005706 const struct device_driver *driver;
5707 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005708
5709 if (len <= 0 || !buffer)
5710 return buffer;
5711 buffer[0] = 0;
5712
5713 parent = dev->dev.parent;
5714
5715 if (!parent)
5716 return buffer;
5717
5718 driver = parent->driver;
5719 if (driver && driver->name)
5720 strlcpy(buffer, driver->name, len);
5721 return buffer;
5722}
5723
Pavel Emelyanov46650792007-10-08 20:38:39 -07005724static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005725{
5726 kfree(net->dev_name_head);
5727 kfree(net->dev_index_head);
5728}
5729
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005730static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005731 .init = netdev_init,
5732 .exit = netdev_exit,
5733};
5734
Pavel Emelyanov46650792007-10-08 20:38:39 -07005735static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005736{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005737 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005738 /*
5739 * Push all migratable of the network devices back to the
5740 * initial network namespace
5741 */
5742 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005743restart:
5744 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005745 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005746 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005747
5748 /* Ignore unmoveable devices (i.e. loopback) */
5749 if (dev->features & NETIF_F_NETNS_LOCAL)
5750 continue;
5751
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005752 /* Delete virtual devices */
5753 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
Eric Dumazet23289a32009-10-27 07:06:36 +00005754 dev->rtnl_link_ops->dellink(dev, NULL);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005755 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005756 }
5757
Eric W. Biedermance286d32007-09-12 13:53:49 +02005758 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005759 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5760 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005761 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005762 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005763 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005764 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005765 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005766 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005767 }
5768 rtnl_unlock();
5769}
5770
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005771static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005772 .exit = default_device_exit,
5773};
5774
Linus Torvalds1da177e2005-04-16 15:20:36 -07005775/*
5776 * Initialize the DEV module. At boot time this walks the device list and
5777 * unhooks any devices that fail to initialise (normally hardware not
5778 * present) and leaves us with a valid list of present and active devices.
5779 *
5780 */
5781
5782/*
5783 * This is called single threaded during boot, so no need
5784 * to take the rtnl semaphore.
5785 */
5786static int __init net_dev_init(void)
5787{
5788 int i, rc = -ENOMEM;
5789
5790 BUG_ON(!dev_boot_phase);
5791
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792 if (dev_proc_init())
5793 goto out;
5794
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005795 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796 goto out;
5797
5798 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005799 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800 INIT_LIST_HEAD(&ptype_base[i]);
5801
Eric W. Biederman881d9662007-09-17 11:56:21 -07005802 if (register_pernet_subsys(&netdev_net_ops))
5803 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804
5805 /*
5806 * Initialise the packet receive queues.
5807 */
5808
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005809 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 struct softnet_data *queue;
5811
5812 queue = &per_cpu(softnet_data, i);
5813 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814 queue->completion_queue = NULL;
5815 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005816
5817 queue->backlog.poll = process_backlog;
5818 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005819 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005820 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821 }
5822
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823 dev_boot_phase = 0;
5824
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005825 /* The loopback device is special if any other network devices
5826 * is present in a network namespace the loopback device must
5827 * be present. Since we now dynamically allocate and free the
5828 * loopback device ensure this invariant is maintained by
5829 * keeping the loopback device as the first device on the
5830 * list of network devices. Ensuring the loopback devices
5831 * is the first device that appears and the last network device
5832 * that disappears.
5833 */
5834 if (register_pernet_device(&loopback_net_ops))
5835 goto out;
5836
5837 if (register_pernet_device(&default_device_ops))
5838 goto out;
5839
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005840 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5841 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005842
5843 hotcpu_notifier(dev_cpu_callback, 0);
5844 dst_init();
5845 dev_mcast_init();
5846 rc = 0;
5847out:
5848 return rc;
5849}
5850
5851subsys_initcall(net_dev_init);
5852
Krishna Kumare88721f2009-02-18 17:55:02 -08005853static int __init initialize_hashrnd(void)
5854{
5855 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5856 return 0;
5857}
5858
5859late_initcall_sync(initialize_hashrnd);
5860