blob: bf629ac08b87e0ada8055725e4d929c555ff23a4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800178 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
Eric W. Biederman881d9662007-09-17 11:56:21 -0700196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Eric W. Biedermance286d32007-09-12 13:53:49 +0200207/* Device list insertion */
208static int list_netdevice(struct net_device *dev)
209{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900210 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211
212 ASSERT_RTNL();
213
214 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800215 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000216 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200219 write_unlock_bh(&dev_base_lock);
220 return 0;
221}
222
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000223/* Device list removal
224 * caller must respect a RCU grace period before freeing/reusing dev
225 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226static void unlist_netdevice(struct net_device *dev)
227{
228 ASSERT_RTNL();
229
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800232 list_del_rcu(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000233 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000234 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235 write_unlock_bh(&dev_base_lock);
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * Our notifier list
240 */
241
Alan Sternf07d5b92006-05-09 15:23:03 -0700242static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
247 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700248
249DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700250EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700384EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386/**
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
389 *
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 *
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
398 */
399void __dev_remove_pack(struct packet_type *pt)
400{
401 struct list_head *head;
402 struct packet_type *pt1;
403
404 spin_lock_bh(&ptype_lock);
405
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700406 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
415 }
416 }
417
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419out:
420 spin_unlock_bh(&ptype_lock);
421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(__dev_remove_pack);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
432 *
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
435 */
436void dev_remove_pack(struct packet_type *pt)
437{
438 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 synchronize_net();
441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/******************************************************************************
445
446 Device Boot-time Settings Routines
447
448*******************************************************************************/
449
450/* Boot time configuration table */
451static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
452
453/**
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
457 *
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
461 */
462static int netdev_boot_setup_add(char *name, struct ifmap *map)
463{
464 struct netdev_boot_setup *s;
465 int i;
466
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700471 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
474 }
475 }
476
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478}
479
480/**
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
483 *
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
488 */
489int netdev_boot_setup_check(struct net_device *dev)
490{
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
493
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700496 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
502 }
503 }
504 return 0;
505}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700506EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508
509/**
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
513 *
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
518 */
519unsigned long netdev_boot_base(const char *prefix, int unit)
520{
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
524
525 sprintf(name, "%s%d", prefix, unit);
526
527 /*
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
530 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700531 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return 1;
533
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
538}
539
540/*
541 * Saves at boot time configured settings for any netdevice.
542 */
543int __init netdev_boot_setup(char *str)
544{
545 int ints[5];
546 struct ifmap map;
547
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
551
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
562
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
565}
566
567__setup("netdev=", netdev_boot_setup);
568
569/*******************************************************************************
570
571 Device Interface Subroutines
572
573*******************************************************************************/
574
575/**
576 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700577 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * @name: name to find
579 *
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
585 */
586
Eric W. Biederman881d9662007-09-17 11:56:21 -0700587struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700590 struct net_device *dev;
591 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700593 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 return NULL;
598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700599EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000602 * dev_get_by_name_rcu - find a device by its name
603 * @net: the applicable net namespace
604 * @name: name to find
605 *
606 * Find an interface by name.
607 * If the name is found a pointer to the device is returned.
608 * If the name is not found then %NULL is returned.
609 * The reference counters are not incremented so the caller must be
610 * careful with locks. The caller must hold RCU lock.
611 */
612
613struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
614{
615 struct hlist_node *p;
616 struct net_device *dev;
617 struct hlist_head *head = dev_name_hash(net, name);
618
619 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
620 if (!strncmp(dev->name, name, IFNAMSIZ))
621 return dev;
622
623 return NULL;
624}
625EXPORT_SYMBOL(dev_get_by_name_rcu);
626
627/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700629 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * @name: name to find
631 *
632 * Find an interface by name. This can be called from any
633 * context and does its own locking. The returned handle has
634 * the usage count incremented and the caller must use dev_put() to
635 * release it when it is no longer needed. %NULL is returned if no
636 * matching device is found.
637 */
638
Eric W. Biederman881d9662007-09-17 11:56:21 -0700639struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
641 struct net_device *dev;
642
Eric Dumazet72c95282009-10-30 07:11:27 +0000643 rcu_read_lock();
644 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (dev)
646 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000647 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return dev;
649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700650EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652/**
653 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700654 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 * @ifindex: index of device
656 *
657 * Search for an interface by index. Returns %NULL if the device
658 * is not found or a pointer to the device. The device has not
659 * had its reference counter increased so the caller must be careful
660 * about locking. The caller must hold either the RTNL semaphore
661 * or @dev_base_lock.
662 */
663
Eric W. Biederman881d9662007-09-17 11:56:21 -0700664struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700667 struct net_device *dev;
668 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700670 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (dev->ifindex == ifindex)
672 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return NULL;
675}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700676EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000678/**
679 * dev_get_by_index_rcu - find a device by its ifindex
680 * @net: the applicable net namespace
681 * @ifindex: index of device
682 *
683 * Search for an interface by index. Returns %NULL if the device
684 * is not found or a pointer to the device. The device has not
685 * had its reference counter increased so the caller must be careful
686 * about locking. The caller must hold RCU lock.
687 */
688
689struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
690{
691 struct hlist_node *p;
692 struct net_device *dev;
693 struct hlist_head *head = dev_index_hash(net, ifindex);
694
695 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
696 if (dev->ifindex == ifindex)
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_index_rcu);
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704/**
705 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700706 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 * @ifindex: index of device
708 *
709 * Search for an interface by index. Returns NULL if the device
710 * is not found or a pointer to the device. The device returned has
711 * had a reference added and the pointer is safe until the user calls
712 * dev_put to indicate they have finished with it.
713 */
714
Eric W. Biederman881d9662007-09-17 11:56:21 -0700715struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct net_device *dev;
718
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000719 rcu_read_lock();
720 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (dev)
722 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000723 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return dev;
725}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700726EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728/**
729 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700730 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * @type: media type of device
732 * @ha: hardware address
733 *
734 * Search for an interface by MAC address. Returns NULL if the device
735 * is not found or a pointer to the device. The caller must hold the
736 * rtnl semaphore. The returned device has not had its ref count increased
737 * and the caller must therefore be careful about locking
738 *
739 * BUGS:
740 * If the API was consistent this would be __dev_get_by_hwaddr
741 */
742
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
745 struct net_device *dev;
746
747 ASSERT_RTNL();
748
Denis V. Lunev81103a52007-12-12 10:47:38 -0800749 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (dev->type == type &&
751 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700752 return dev;
753
754 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300756EXPORT_SYMBOL(dev_getbyhwaddr);
757
Eric W. Biederman881d9662007-09-17 11:56:21 -0700758struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700759{
760 struct net_device *dev;
761
762 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700763 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700764 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700765 return dev;
766
767 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700768}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700769EXPORT_SYMBOL(__dev_getfirstbyhwtype);
770
Eric W. Biederman881d9662007-09-17 11:56:21 -0700771struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
773 struct net_device *dev;
774
775 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700776 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700777 if (dev)
778 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 rtnl_unlock();
780 return dev;
781}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782EXPORT_SYMBOL(dev_getfirstbyhwtype);
783
784/**
785 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700786 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 * @if_flags: IFF_* values
788 * @mask: bitmask of bits in if_flags to check
789 *
790 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900791 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 * had a reference added and the pointer is safe until the user calls
793 * dev_put to indicate they have finished with it.
794 */
795
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700796struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
797 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700799 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 ret = NULL;
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800802 rcu_read_lock();
803 for_each_netdev_rcu(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (((dev->flags ^ if_flags) & mask) == 0) {
805 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700806 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 break;
808 }
809 }
Eric Dumazetc6d14c82009-11-04 05:43:23 -0800810 rcu_read_unlock();
Pavel Emelianov7562f872007-05-03 15:13:45 -0700811 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700813EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815/**
816 * dev_valid_name - check if name is okay for network device
817 * @name: name string
818 *
819 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700820 * to allow sysfs to work. We also disallow any kind of
821 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800823int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700825 if (*name == '\0')
826 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700827 if (strlen(name) >= IFNAMSIZ)
828 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700829 if (!strcmp(name, ".") || !strcmp(name, ".."))
830 return 0;
831
832 while (*name) {
833 if (*name == '/' || isspace(*name))
834 return 0;
835 name++;
836 }
837 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700839EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200842 * __dev_alloc_name - allocate a name for a device
843 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 *
847 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700848 * id. It scans list of devices to build up a free map, then chooses
849 * the first empty slot. The caller must hold the dev_base or rtnl lock
850 * while allocating the name and adding the device in order to avoid
851 * duplicates.
852 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
853 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 */
855
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200856static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
858 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 const char *p;
860 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700861 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 struct net_device *d;
863
864 p = strnchr(name, IFNAMSIZ-1, '%');
865 if (p) {
866 /*
867 * Verify the string as this thing may have come from
868 * the user. There must be either one "%d" and no other "%"
869 * characters.
870 */
871 if (p[1] != 'd' || strchr(p + 2, '%'))
872 return -EINVAL;
873
874 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700875 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 if (!inuse)
877 return -ENOMEM;
878
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (!sscanf(d->name, name, &i))
881 continue;
882 if (i < 0 || i >= max_netdevices)
883 continue;
884
885 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200886 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 if (!strncmp(buf, d->name, IFNAMSIZ))
888 set_bit(i, inuse);
889 }
890
891 i = find_first_zero_bit(inuse, max_netdevices);
892 free_page((unsigned long) inuse);
893 }
894
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200895 snprintf(buf, IFNAMSIZ, name, i);
896 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /* It is possible to run out of possible slots
900 * when the name is long and there isn't enough space left
901 * for the digits, or if all bits are used.
902 */
903 return -ENFILE;
904}
905
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200906/**
907 * dev_alloc_name - allocate a name for a device
908 * @dev: device
909 * @name: name format string
910 *
911 * Passed a format string - eg "lt%d" it will try and find a suitable
912 * id. It scans list of devices to build up a free map, then chooses
913 * the first empty slot. The caller must hold the dev_base or rtnl lock
914 * while allocating the name and adding the device in order to avoid
915 * duplicates.
916 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
917 * Returns the number of the unit assigned or a negative errno code.
918 */
919
920int dev_alloc_name(struct net_device *dev, const char *name)
921{
922 char buf[IFNAMSIZ];
923 struct net *net;
924 int ret;
925
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900926 BUG_ON(!dev_net(dev));
927 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200928 ret = __dev_alloc_name(net, name, buf);
929 if (ret >= 0)
930 strlcpy(dev->name, buf, IFNAMSIZ);
931 return ret;
932}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700933EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936/**
937 * dev_change_name - change name of a device
938 * @dev: device
939 * @newname: name (or format string) must be at least IFNAMSIZ
940 *
941 * Change name of a device, can pass format strings "eth%d".
942 * for wildcarding.
943 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700944int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Herbert Xufcc5a032007-07-30 17:03:38 -0700946 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700948 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700949 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900952 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900954 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if (dev->flags & IFF_UP)
956 return -EBUSY;
957
958 if (!dev_valid_name(newname))
959 return -EINVAL;
960
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700961 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
962 return 0;
963
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 memcpy(oldname, dev->name, IFNAMSIZ);
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 if (strchr(newname, '%')) {
967 err = dev_alloc_name(dev, newname);
968 if (err < 0)
969 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700970 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return -EEXIST;
972 else
973 strlcpy(dev->name, newname, IFNAMSIZ);
974
Herbert Xufcc5a032007-07-30 17:03:38 -0700975rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700976 /* For now only devices in the initial network namespace
977 * are in sysfs.
978 */
979 if (net == &init_net) {
980 ret = device_rename(&dev->dev, dev->name);
981 if (ret) {
982 memcpy(dev->name, oldname, IFNAMSIZ);
983 return ret;
984 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700985 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700986
987 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600988 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000989 write_unlock_bh(&dev_base_lock);
990
991 synchronize_rcu();
992
993 write_lock_bh(&dev_base_lock);
994 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700995 write_unlock_bh(&dev_base_lock);
996
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700997 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700998 ret = notifier_to_errno(ret);
999
1000 if (ret) {
1001 if (err) {
1002 printk(KERN_ERR
1003 "%s: name change rollback failed: %d.\n",
1004 dev->name, ret);
1005 } else {
1006 err = ret;
1007 memcpy(dev->name, oldname, IFNAMSIZ);
1008 goto rollback;
1009 }
1010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 return err;
1013}
1014
1015/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001016 * dev_set_alias - change ifalias of a device
1017 * @dev: device
1018 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001019 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001020 *
1021 * Set ifalias for a device,
1022 */
1023int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1024{
1025 ASSERT_RTNL();
1026
1027 if (len >= IFALIASZ)
1028 return -EINVAL;
1029
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001030 if (!len) {
1031 if (dev->ifalias) {
1032 kfree(dev->ifalias);
1033 dev->ifalias = NULL;
1034 }
1035 return 0;
1036 }
1037
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001038 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001039 if (!dev->ifalias)
1040 return -ENOMEM;
1041
1042 strlcpy(dev->ifalias, alias, len+1);
1043 return len;
1044}
1045
1046
1047/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001048 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001049 * @dev: device to cause notification
1050 *
1051 * Called to indicate a device has changed features.
1052 */
1053void netdev_features_change(struct net_device *dev)
1054{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001055 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001056}
1057EXPORT_SYMBOL(netdev_features_change);
1058
1059/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 * netdev_state_change - device changes state
1061 * @dev: device to cause notification
1062 *
1063 * Called to indicate a device has changed state. This function calls
1064 * the notifier chains for netdev_chain and sends a NEWLINK message
1065 * to the routing socket.
1066 */
1067void netdev_state_change(struct net_device *dev)
1068{
1069 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001070 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1072 }
1073}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001074EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Moni Shoua75c78502009-09-15 02:37:40 -07001076void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001077{
Moni Shoua75c78502009-09-15 02:37:40 -07001078 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001079}
1080EXPORT_SYMBOL(netdev_bonding_change);
1081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082/**
1083 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001084 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 * @name: name of interface
1086 *
1087 * If a network interface is not present and the process has suitable
1088 * privileges this function loads the module. If module loading is not
1089 * available in this kernel then it becomes a nop.
1090 */
1091
Eric W. Biederman881d9662007-09-17 11:56:21 -07001092void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001094 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Eric Dumazet72c95282009-10-30 07:11:27 +00001096 rcu_read_lock();
1097 dev = dev_get_by_name_rcu(net, name);
1098 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Eric Parisa8f80e82009-08-13 09:44:51 -04001100 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 request_module("%s", name);
1102}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001103EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105/**
1106 * dev_open - prepare an interface for use.
1107 * @dev: device to open
1108 *
1109 * Takes a device from down to up state. The device's private open
1110 * function is invoked and then the multicast lists are loaded. Finally
1111 * the device is moved into the up state and a %NETDEV_UP message is
1112 * sent to the netdev notifier chain.
1113 *
1114 * Calling this function on an active interface is a nop. On a failure
1115 * a negative errno code is returned.
1116 */
1117int dev_open(struct net_device *dev)
1118{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001119 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001120 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001122 ASSERT_RTNL();
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 /*
1125 * Is it already up?
1126 */
1127
1128 if (dev->flags & IFF_UP)
1129 return 0;
1130
1131 /*
1132 * Is it even present?
1133 */
1134 if (!netif_device_present(dev))
1135 return -ENODEV;
1136
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001137 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1138 ret = notifier_to_errno(ret);
1139 if (ret)
1140 return ret;
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 /*
1143 * Call device private open method
1144 */
1145 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001146
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001147 if (ops->ndo_validate_addr)
1148 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001149
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001150 if (!ret && ops->ndo_open)
1151 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001153 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 * If it went open OK then:
1155 */
1156
Jeff Garzikbada3392007-10-23 20:19:37 -07001157 if (ret)
1158 clear_bit(__LINK_STATE_START, &dev->state);
1159 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 /*
1161 * Set the flags.
1162 */
1163 dev->flags |= IFF_UP;
1164
1165 /*
Dan Williams649274d2009-01-11 00:20:39 -08001166 * Enable NET_DMA
1167 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001168 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001169
1170 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 * Initialize multicasting status
1172 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001173 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
1175 /*
1176 * Wakeup transmit queue engine
1177 */
1178 dev_activate(dev);
1179
1180 /*
1181 * ... and announce new interface.
1182 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001183 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 return ret;
1187}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001188EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190/**
1191 * dev_close - shutdown an interface.
1192 * @dev: device to shutdown
1193 *
1194 * This function moves an active device into down state. A
1195 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1196 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1197 * chain.
1198 */
1199int dev_close(struct net_device *dev)
1200{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001201 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001202 ASSERT_RTNL();
1203
David S. Miller9d5010d2007-09-12 14:33:25 +02001204 might_sleep();
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (!(dev->flags & IFF_UP))
1207 return 0;
1208
1209 /*
1210 * Tell people we are going down, so that they can
1211 * prepare to death, when device is still operating.
1212 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001213 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 clear_bit(__LINK_STATE_START, &dev->state);
1216
1217 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001218 * it can be even on different cpu. So just clear netif_running().
1219 *
1220 * dev->stop() will invoke napi_disable() on all of it's
1221 * napi_struct instances on this device.
1222 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001225 dev_deactivate(dev);
1226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 /*
1228 * Call the device specific close. This cannot fail.
1229 * Only if device is UP
1230 *
1231 * We allow it to be called even after a DETACH hot-plug
1232 * event.
1233 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001234 if (ops->ndo_stop)
1235 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 /*
1238 * Device is now down.
1239 */
1240
1241 dev->flags &= ~IFF_UP;
1242
1243 /*
1244 * Tell people we are down
1245 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001246 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Dan Williams649274d2009-01-11 00:20:39 -08001248 /*
1249 * Shutdown NET_DMA
1250 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001251 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 return 0;
1254}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001255EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001258/**
1259 * dev_disable_lro - disable Large Receive Offload on a device
1260 * @dev: device
1261 *
1262 * Disable Large Receive Offload (LRO) on a net device. Must be
1263 * called under RTNL. This is needed if received packets may be
1264 * forwarded to another interface.
1265 */
1266void dev_disable_lro(struct net_device *dev)
1267{
1268 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1269 dev->ethtool_ops->set_flags) {
1270 u32 flags = dev->ethtool_ops->get_flags(dev);
1271 if (flags & ETH_FLAG_LRO) {
1272 flags &= ~ETH_FLAG_LRO;
1273 dev->ethtool_ops->set_flags(dev, flags);
1274 }
1275 }
1276 WARN_ON(dev->features & NETIF_F_LRO);
1277}
1278EXPORT_SYMBOL(dev_disable_lro);
1279
1280
Eric W. Biederman881d9662007-09-17 11:56:21 -07001281static int dev_boot_phase = 1;
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283/*
1284 * Device change register/unregister. These are not inline or static
1285 * as we export them to the world.
1286 */
1287
1288/**
1289 * register_netdevice_notifier - register a network notifier block
1290 * @nb: notifier
1291 *
1292 * Register a notifier to be called when network device events occur.
1293 * The notifier passed is linked into the kernel structures and must
1294 * not be reused until it has been unregistered. A negative errno code
1295 * is returned on a failure.
1296 *
1297 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001298 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 * view of the network device list.
1300 */
1301
1302int register_netdevice_notifier(struct notifier_block *nb)
1303{
1304 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001305 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001306 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 int err;
1308
1309 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001310 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001311 if (err)
1312 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001313 if (dev_boot_phase)
1314 goto unlock;
1315 for_each_net(net) {
1316 for_each_netdev(net, dev) {
1317 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1318 err = notifier_to_errno(err);
1319 if (err)
1320 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Eric W. Biederman881d9662007-09-17 11:56:21 -07001322 if (!(dev->flags & IFF_UP))
1323 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001324
Eric W. Biederman881d9662007-09-17 11:56:21 -07001325 nb->notifier_call(nb, NETDEV_UP, dev);
1326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001328
1329unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 rtnl_unlock();
1331 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001332
1333rollback:
1334 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001335 for_each_net(net) {
1336 for_each_netdev(net, dev) {
1337 if (dev == last)
1338 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001339
Eric W. Biederman881d9662007-09-17 11:56:21 -07001340 if (dev->flags & IFF_UP) {
1341 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1342 nb->notifier_call(nb, NETDEV_DOWN, dev);
1343 }
1344 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001345 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001346 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001347
1348 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001349 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001351EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353/**
1354 * unregister_netdevice_notifier - unregister a network notifier block
1355 * @nb: notifier
1356 *
1357 * Unregister a notifier previously registered by
1358 * register_netdevice_notifier(). The notifier is unlinked into the
1359 * kernel structures and may then be reused. A negative errno code
1360 * is returned on a failure.
1361 */
1362
1363int unregister_netdevice_notifier(struct notifier_block *nb)
1364{
Herbert Xu9f514952006-03-25 01:24:25 -08001365 int err;
1366
1367 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001368 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001369 rtnl_unlock();
1370 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001372EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374/**
1375 * call_netdevice_notifiers - call all network notifier blocks
1376 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001377 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 *
1379 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001380 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 */
1382
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001383int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001385 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386}
1387
1388/* When > 0 there are consumers of rx skb time stamps */
1389static atomic_t netstamp_needed = ATOMIC_INIT(0);
1390
1391void net_enable_timestamp(void)
1392{
1393 atomic_inc(&netstamp_needed);
1394}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001395EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397void net_disable_timestamp(void)
1398{
1399 atomic_dec(&netstamp_needed);
1400}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001401EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001403static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
1405 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001406 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001407 else
1408 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409}
1410
1411/*
1412 * Support routine. Sends outgoing frames to any network
1413 * taps currently in use.
1414 */
1415
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001416static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
1418 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001419
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001420#ifdef CONFIG_NET_CLS_ACT
1421 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1422 net_timestamp(skb);
1423#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001424 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001425#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 rcu_read_lock();
1428 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1429 /* Never send packets back to the socket
1430 * they originated from - MvS (miquels@drinkel.ow.org)
1431 */
1432 if ((ptype->dev == dev || !ptype->dev) &&
1433 (ptype->af_packet_priv == NULL ||
1434 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001435 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 if (!skb2)
1437 break;
1438
1439 /* skb->nh should be correctly
1440 set by sender, so that the second statement is
1441 just protection against buggy protocols.
1442 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001443 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001445 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001446 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 if (net_ratelimit())
1448 printk(KERN_CRIT "protocol %04x is "
1449 "buggy, dev %s\n",
1450 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001451 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 }
1453
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001454 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001456 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 }
1458 }
1459 rcu_read_unlock();
1460}
1461
Denis Vlasenko56079432006-03-29 15:57:29 -08001462
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001463static inline void __netif_reschedule(struct Qdisc *q)
1464{
1465 struct softnet_data *sd;
1466 unsigned long flags;
1467
1468 local_irq_save(flags);
1469 sd = &__get_cpu_var(softnet_data);
1470 q->next_sched = sd->output_queue;
1471 sd->output_queue = q;
1472 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1473 local_irq_restore(flags);
1474}
1475
David S. Miller37437bb2008-07-16 02:15:04 -07001476void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001477{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001478 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1479 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001480}
1481EXPORT_SYMBOL(__netif_schedule);
1482
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001483void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001484{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001485 if (atomic_dec_and_test(&skb->users)) {
1486 struct softnet_data *sd;
1487 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001488
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001489 local_irq_save(flags);
1490 sd = &__get_cpu_var(softnet_data);
1491 skb->next = sd->completion_queue;
1492 sd->completion_queue = skb;
1493 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1494 local_irq_restore(flags);
1495 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001496}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001497EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001498
1499void dev_kfree_skb_any(struct sk_buff *skb)
1500{
1501 if (in_irq() || irqs_disabled())
1502 dev_kfree_skb_irq(skb);
1503 else
1504 dev_kfree_skb(skb);
1505}
1506EXPORT_SYMBOL(dev_kfree_skb_any);
1507
1508
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001509/**
1510 * netif_device_detach - mark device as removed
1511 * @dev: network device
1512 *
1513 * Mark device as removed from system and therefore no longer available.
1514 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001515void netif_device_detach(struct net_device *dev)
1516{
1517 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1518 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001519 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001520 }
1521}
1522EXPORT_SYMBOL(netif_device_detach);
1523
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001524/**
1525 * netif_device_attach - mark device as attached
1526 * @dev: network device
1527 *
1528 * Mark device as attached from system and restart if needed.
1529 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001530void netif_device_attach(struct net_device *dev)
1531{
1532 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1533 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001534 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001535 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001536 }
1537}
1538EXPORT_SYMBOL(netif_device_attach);
1539
Ben Hutchings6de329e2008-06-16 17:02:28 -07001540static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1541{
1542 return ((features & NETIF_F_GEN_CSUM) ||
1543 ((features & NETIF_F_IP_CSUM) &&
1544 protocol == htons(ETH_P_IP)) ||
1545 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001546 protocol == htons(ETH_P_IPV6)) ||
1547 ((features & NETIF_F_FCOE_CRC) &&
1548 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001549}
1550
1551static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1552{
1553 if (can_checksum_protocol(dev->features, skb->protocol))
1554 return true;
1555
1556 if (skb->protocol == htons(ETH_P_8021Q)) {
1557 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1558 if (can_checksum_protocol(dev->features & dev->vlan_features,
1559 veh->h_vlan_encapsulated_proto))
1560 return true;
1561 }
1562
1563 return false;
1564}
Denis Vlasenko56079432006-03-29 15:57:29 -08001565
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566/*
1567 * Invalidate hardware checksum when packet is to be mangled, and
1568 * complete checksum manually on outgoing path.
1569 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001570int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Al Virod3bc23e2006-11-14 21:24:49 -08001572 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001573 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Patrick McHardy84fa7932006-08-29 16:44:56 -07001575 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001576 goto out_set_summed;
1577
1578 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001579 /* Let GSO fix up the checksum. */
1580 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 }
1582
Herbert Xua0308472007-10-15 01:47:15 -07001583 offset = skb->csum_start - skb_headroom(skb);
1584 BUG_ON(offset >= skb_headlen(skb));
1585 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1586
1587 offset += skb->csum_offset;
1588 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1589
1590 if (skb_cloned(skb) &&
1591 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1593 if (ret)
1594 goto out;
1595 }
1596
Herbert Xua0308472007-10-15 01:47:15 -07001597 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001598out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001600out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 return ret;
1602}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001603EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605/**
1606 * skb_gso_segment - Perform segmentation on skb.
1607 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001608 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001609 *
1610 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001611 *
1612 * It may return NULL if the skb requires no segmentation. This is
1613 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001614 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001615struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001616{
1617 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1618 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001619 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001620 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001621
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001622 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001623 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001624 __skb_pull(skb, skb->mac_len);
1625
Herbert Xu67fd1a72009-01-19 16:26:44 -08001626 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1627 struct net_device *dev = skb->dev;
1628 struct ethtool_drvinfo info = {};
1629
1630 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1631 dev->ethtool_ops->get_drvinfo(dev, &info);
1632
1633 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1634 "ip_summed=%d",
1635 info.driver, dev ? dev->features : 0L,
1636 skb->sk ? skb->sk->sk_route_caps : 0L,
1637 skb->len, skb->data_len, skb->ip_summed);
1638
Herbert Xua430a432006-07-08 13:34:56 -07001639 if (skb_header_cloned(skb) &&
1640 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1641 return ERR_PTR(err);
1642 }
1643
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001644 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001645 list_for_each_entry_rcu(ptype,
1646 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001647 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001648 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001649 err = ptype->gso_send_check(skb);
1650 segs = ERR_PTR(err);
1651 if (err || skb_gso_ok(skb, features))
1652 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001653 __skb_push(skb, (skb->data -
1654 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001655 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001657 break;
1658 }
1659 }
1660 rcu_read_unlock();
1661
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001662 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001663
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001664 return segs;
1665}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666EXPORT_SYMBOL(skb_gso_segment);
1667
Herbert Xufb286bb2005-11-10 13:01:24 -08001668/* Take action when hardware reception checksum errors are detected. */
1669#ifdef CONFIG_BUG
1670void netdev_rx_csum_fault(struct net_device *dev)
1671{
1672 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001673 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001674 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001675 dump_stack();
1676 }
1677}
1678EXPORT_SYMBOL(netdev_rx_csum_fault);
1679#endif
1680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681/* Actually, we should eliminate this check as soon as we know, that:
1682 * 1. IOMMU is present and allows to map all the memory.
1683 * 2. No high memory really exists on this machine.
1684 */
1685
1686static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1687{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001688#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 int i;
1690
1691 if (dev->features & NETIF_F_HIGHDMA)
1692 return 0;
1693
1694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1695 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1696 return 1;
1697
Herbert Xu3d3a8532006-06-27 13:33:10 -07001698#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return 0;
1700}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001702struct dev_gso_cb {
1703 void (*destructor)(struct sk_buff *skb);
1704};
1705
1706#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1707
1708static void dev_gso_skb_destructor(struct sk_buff *skb)
1709{
1710 struct dev_gso_cb *cb;
1711
1712 do {
1713 struct sk_buff *nskb = skb->next;
1714
1715 skb->next = nskb->next;
1716 nskb->next = NULL;
1717 kfree_skb(nskb);
1718 } while (skb->next);
1719
1720 cb = DEV_GSO_CB(skb);
1721 if (cb->destructor)
1722 cb->destructor(skb);
1723}
1724
1725/**
1726 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1727 * @skb: buffer to segment
1728 *
1729 * This function segments the given skb and stores the list of segments
1730 * in skb->next.
1731 */
1732static int dev_gso_segment(struct sk_buff *skb)
1733{
1734 struct net_device *dev = skb->dev;
1735 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001736 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1737 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001738
Herbert Xu576a30e2006-06-27 13:22:38 -07001739 segs = skb_gso_segment(skb, features);
1740
1741 /* Verifying header integrity only. */
1742 if (!segs)
1743 return 0;
1744
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001745 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001746 return PTR_ERR(segs);
1747
1748 skb->next = segs;
1749 DEV_GSO_CB(skb)->destructor = skb->destructor;
1750 skb->destructor = dev_gso_skb_destructor;
1751
1752 return 0;
1753}
1754
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001755int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1756 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001757{
Stephen Hemminger00829822008-11-20 20:14:53 -08001758 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001759 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001760
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001761 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001762 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001763 dev_queue_xmit_nit(skb, dev);
1764
Herbert Xu576a30e2006-06-27 13:22:38 -07001765 if (netif_needs_gso(dev, skb)) {
1766 if (unlikely(dev_gso_segment(skb)))
1767 goto out_kfree_skb;
1768 if (skb->next)
1769 goto gso;
1770 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001771
Eric Dumazet93f154b2009-05-18 22:19:19 -07001772 /*
1773 * If device doesnt need skb->dst, release it right now while
1774 * its hot in this cpu cache
1775 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001776 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1777 skb_dst_drop(skb);
1778
Patrick Ohlyac45f602009-02-12 05:03:37 +00001779 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001780 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001781 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001782 /*
1783 * TODO: if skb_orphan() was called by
1784 * dev->hard_start_xmit() (for example, the unmodified
1785 * igb driver does that; bnx2 doesn't), then
1786 * skb_tx_software_timestamp() will be unable to send
1787 * back the time stamp.
1788 *
1789 * How can this be prevented? Always create another
1790 * reference to the socket before calling
1791 * dev->hard_start_xmit()? Prevent that skb_orphan()
1792 * does anything in dev->hard_start_xmit() by clearing
1793 * the skb destructor before the call and restoring it
1794 * afterwards, then doing the skb_orphan() ourselves?
1795 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001796 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001797 }
1798
Herbert Xu576a30e2006-06-27 13:22:38 -07001799gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001800 do {
1801 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001802
1803 skb->next = nskb->next;
1804 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001805 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001806 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001807 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001808 skb->next = nskb;
1809 return rc;
1810 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001811 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001812 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001813 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001814 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001815
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001816 skb->destructor = DEV_GSO_CB(skb)->destructor;
1817
1818out_kfree_skb:
1819 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001820 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001821}
1822
David S. Miller70192982009-01-27 16:34:47 -08001823static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001824
Stephen Hemminger92477442009-03-21 13:39:26 -07001825u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001826{
David S. Miller70192982009-01-27 16:34:47 -08001827 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001828
David S. Miller513de112009-05-03 14:43:10 -07001829 if (skb_rx_queue_recorded(skb)) {
1830 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001831 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001832 hash -= dev->real_num_tx_queues;
1833 return hash;
1834 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001835
1836 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001837 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001838 else
David S. Miller70192982009-01-27 16:34:47 -08001839 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001840
David S. Miller70192982009-01-27 16:34:47 -08001841 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001842
David S. Millerb6b2fed2008-07-21 09:48:06 -07001843 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001844}
Stephen Hemminger92477442009-03-21 13:39:26 -07001845EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001846
David S. Millere8a04642008-07-17 00:34:19 -07001847static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1848 struct sk_buff *skb)
1849{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001850 u16 queue_index;
1851 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001852
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001853 if (sk_tx_queue_recorded(sk)) {
1854 queue_index = sk_tx_queue_get(sk);
1855 } else {
1856 const struct net_device_ops *ops = dev->netdev_ops;
1857
1858 if (ops->ndo_select_queue) {
1859 queue_index = ops->ndo_select_queue(dev, skb);
1860 } else {
1861 queue_index = 0;
1862 if (dev->real_num_tx_queues > 1)
1863 queue_index = skb_tx_hash(dev, skb);
1864
1865 if (sk && sk->sk_dst_cache)
1866 sk_tx_queue_set(sk, queue_index);
1867 }
1868 }
David S. Millereae792b2008-07-15 03:03:33 -07001869
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001870 skb_set_queue_mapping(skb, queue_index);
1871 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001872}
1873
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001874static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1875 struct net_device *dev,
1876 struct netdev_queue *txq)
1877{
1878 spinlock_t *root_lock = qdisc_lock(q);
1879 int rc;
1880
1881 spin_lock(root_lock);
1882 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1883 kfree_skb(skb);
1884 rc = NET_XMIT_DROP;
1885 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1886 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1887 /*
1888 * This is a work-conserving queue; there are no old skbs
1889 * waiting to be sent out; and the qdisc is not running -
1890 * xmit the skb directly.
1891 */
1892 __qdisc_update_bstats(q, skb->len);
1893 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1894 __qdisc_run(q);
1895 else
1896 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1897
1898 rc = NET_XMIT_SUCCESS;
1899 } else {
1900 rc = qdisc_enqueue_root(skb, q);
1901 qdisc_run(q);
1902 }
1903 spin_unlock(root_lock);
1904
1905 return rc;
1906}
1907
Dave Jonesd29f7492008-07-22 14:09:06 -07001908/**
1909 * dev_queue_xmit - transmit a buffer
1910 * @skb: buffer to transmit
1911 *
1912 * Queue a buffer for transmission to a network device. The caller must
1913 * have set the device and priority and built the buffer before calling
1914 * this function. The function can be called from an interrupt.
1915 *
1916 * A negative errno code is returned on a failure. A success does not
1917 * guarantee the frame will be transmitted as it may be dropped due
1918 * to congestion or traffic shaping.
1919 *
1920 * -----------------------------------------------------------------------------------
1921 * I notice this method can also return errors from the queue disciplines,
1922 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1923 * be positive.
1924 *
1925 * Regardless of the return value, the skb is consumed, so it is currently
1926 * difficult to retry a send to this method. (You can bump the ref count
1927 * before sending to hold a reference for retry if you are careful.)
1928 *
1929 * When calling this method, interrupts MUST be enabled. This is because
1930 * the BH enable code must have IRQs enabled so that it will not deadlock.
1931 * --BLG
1932 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933int dev_queue_xmit(struct sk_buff *skb)
1934{
1935 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001936 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 struct Qdisc *q;
1938 int rc = -ENOMEM;
1939
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001940 /* GSO will handle the following emulations directly. */
1941 if (netif_needs_gso(dev, skb))
1942 goto gso;
1943
David S. Miller4cf704f2009-06-09 00:18:51 -07001944 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001946 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 goto out_kfree_skb;
1948
1949 /* Fragmented skb is linearized if device does not support SG,
1950 * or if at least one of fragments is in highmem and device
1951 * does not support DMA from it.
1952 */
1953 if (skb_shinfo(skb)->nr_frags &&
1954 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001955 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 goto out_kfree_skb;
1957
1958 /* If packet is not checksummed and device does not support
1959 * checksumming for this protocol, complete checksumming here.
1960 */
Herbert Xu663ead32007-04-09 11:59:07 -07001961 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1962 skb_set_transport_header(skb, skb->csum_start -
1963 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001964 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1965 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001968gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001969 /* Disable soft irqs for various locks below. Also
1970 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001972 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
David S. Millereae792b2008-07-15 03:03:33 -07001974 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001975 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001978 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979#endif
1980 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001981 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001982 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
1984
1985 /* The device has no queue. Common case for software devices:
1986 loopback, all the sorts of tunnels...
1987
Herbert Xu932ff272006-06-09 12:20:56 -07001988 Really, it is unlikely that netif_tx_lock protection is necessary
1989 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 counters.)
1991 However, it is possible, that they rely on protection
1992 made by us here.
1993
1994 Check this and shot the lock. It is not prone from deadlocks.
1995 Either shot noqueue qdisc, it is even simpler 8)
1996 */
1997 if (dev->flags & IFF_UP) {
1998 int cpu = smp_processor_id(); /* ok because BHs are off */
1999
David S. Millerc773e842008-07-08 23:13:53 -07002000 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
David S. Millerc773e842008-07-08 23:13:53 -07002002 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002004 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00002005 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002006 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07002007 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 goto out;
2009 }
2010 }
David S. Millerc773e842008-07-08 23:13:53 -07002011 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 if (net_ratelimit())
2013 printk(KERN_CRIT "Virtual device %s asks to "
2014 "queue packet!\n", dev->name);
2015 } else {
2016 /* Recursion is detected! It is possible,
2017 * unfortunately */
2018 if (net_ratelimit())
2019 printk(KERN_CRIT "Dead loop on virtual device "
2020 "%s, fix it urgently!\n", dev->name);
2021 }
2022 }
2023
2024 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002025 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027out_kfree_skb:
2028 kfree_skb(skb);
2029 return rc;
2030out:
Herbert Xud4828d82006-06-22 02:28:18 -07002031 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return rc;
2033}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002034EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036
2037/*=======================================================================
2038 Receiver routines
2039 =======================================================================*/
2040
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002041int netdev_max_backlog __read_mostly = 1000;
2042int netdev_budget __read_mostly = 300;
2043int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2046
2047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048/**
2049 * netif_rx - post buffer to the network code
2050 * @skb: buffer to post
2051 *
2052 * This function receives a packet from a device driver and queues it for
2053 * the upper (protocol) levels to process. It always succeeds. The buffer
2054 * may be dropped during processing for congestion control or by the
2055 * protocol layers.
2056 *
2057 * return values:
2058 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 * NET_RX_DROP (packet was dropped)
2060 *
2061 */
2062
2063int netif_rx(struct sk_buff *skb)
2064{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 struct softnet_data *queue;
2066 unsigned long flags;
2067
2068 /* if netpoll wants it, pretend we never saw it */
2069 if (netpoll_rx(skb))
2070 return NET_RX_DROP;
2071
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002072 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002073 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 /*
2076 * The code is rearranged so that the path is the most
2077 * short when CPU is congested, but is still operating.
2078 */
2079 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 queue = &__get_cpu_var(softnet_data);
2081
2082 __get_cpu_var(netdev_rx_stat).total++;
2083 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2084 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002088 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
2090
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002091 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 goto enqueue;
2093 }
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 __get_cpu_var(netdev_rx_stat).dropped++;
2096 local_irq_restore(flags);
2097
2098 kfree_skb(skb);
2099 return NET_RX_DROP;
2100}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002101EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103int netif_rx_ni(struct sk_buff *skb)
2104{
2105 int err;
2106
2107 preempt_disable();
2108 err = netif_rx(skb);
2109 if (local_softirq_pending())
2110 do_softirq();
2111 preempt_enable();
2112
2113 return err;
2114}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115EXPORT_SYMBOL(netif_rx_ni);
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117static void net_tx_action(struct softirq_action *h)
2118{
2119 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2120
2121 if (sd->completion_queue) {
2122 struct sk_buff *clist;
2123
2124 local_irq_disable();
2125 clist = sd->completion_queue;
2126 sd->completion_queue = NULL;
2127 local_irq_enable();
2128
2129 while (clist) {
2130 struct sk_buff *skb = clist;
2131 clist = clist->next;
2132
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002133 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 __kfree_skb(skb);
2135 }
2136 }
2137
2138 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002139 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 local_irq_disable();
2142 head = sd->output_queue;
2143 sd->output_queue = NULL;
2144 local_irq_enable();
2145
2146 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002147 struct Qdisc *q = head;
2148 spinlock_t *root_lock;
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 head = head->next_sched;
2151
David S. Miller5fb66222008-08-02 20:02:43 -07002152 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002153 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002154 smp_mb__before_clear_bit();
2155 clear_bit(__QDISC_STATE_SCHED,
2156 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002157 qdisc_run(q);
2158 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002160 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002161 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002162 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002163 } else {
2164 smp_mb__before_clear_bit();
2165 clear_bit(__QDISC_STATE_SCHED,
2166 &q->state);
2167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 }
2169 }
2170 }
2171}
2172
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002173static inline int deliver_skb(struct sk_buff *skb,
2174 struct packet_type *pt_prev,
2175 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176{
2177 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002178 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180
2181#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002182
2183#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2184/* This hook is defined here for ATM LANE */
2185int (*br_fdb_test_addr_hook)(struct net_device *dev,
2186 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002187EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002188#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Stephen Hemminger6229e362007-03-21 13:38:47 -07002190/*
2191 * If bridge module is loaded call bridging hook.
2192 * returns NULL if packet was consumed.
2193 */
2194struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2195 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002196EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002197
Stephen Hemminger6229e362007-03-21 13:38:47 -07002198static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2199 struct packet_type **pt_prev, int *ret,
2200 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
2202 struct net_bridge_port *port;
2203
Stephen Hemminger6229e362007-03-21 13:38:47 -07002204 if (skb->pkt_type == PACKET_LOOPBACK ||
2205 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2206 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002209 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002211 }
2212
Stephen Hemminger6229e362007-03-21 13:38:47 -07002213 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214}
2215#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002216#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217#endif
2218
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002219#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2220struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2221EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2222
2223static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2224 struct packet_type **pt_prev,
2225 int *ret,
2226 struct net_device *orig_dev)
2227{
2228 if (skb->dev->macvlan_port == NULL)
2229 return skb;
2230
2231 if (*pt_prev) {
2232 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2233 *pt_prev = NULL;
2234 }
2235 return macvlan_handle_frame_hook(skb);
2236}
2237#else
2238#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2239#endif
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241#ifdef CONFIG_NET_CLS_ACT
2242/* TODO: Maybe we should just force sch_ingress to be compiled in
2243 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2244 * a compare and 2 stores extra right now if we dont have it on
2245 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002246 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 * the ingress scheduler, you just cant add policies on ingress.
2248 *
2249 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002250static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002253 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002254 struct netdev_queue *rxq;
2255 int result = TC_ACT_OK;
2256 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002257
Herbert Xuf697c3e2007-10-14 00:38:47 -07002258 if (MAX_RED_LOOP < ttl++) {
2259 printk(KERN_WARNING
2260 "Redir loop detected Dropping packet (%d->%d)\n",
2261 skb->iif, dev->ifindex);
2262 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264
Herbert Xuf697c3e2007-10-14 00:38:47 -07002265 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2266 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2267
David S. Miller555353c2008-07-08 17:33:13 -07002268 rxq = &dev->rx_queue;
2269
David S. Miller83874002008-07-17 00:53:03 -07002270 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002271 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002272 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002273 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2274 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002275 spin_unlock(qdisc_lock(q));
2276 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 return result;
2279}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002280
2281static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2282 struct packet_type **pt_prev,
2283 int *ret, struct net_device *orig_dev)
2284{
David S. Miller8d50b532008-07-30 02:37:46 -07002285 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002286 goto out;
2287
2288 if (*pt_prev) {
2289 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2290 *pt_prev = NULL;
2291 } else {
2292 /* Huh? Why does turning on AF_PACKET affect this? */
2293 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2294 }
2295
2296 switch (ing_filter(skb)) {
2297 case TC_ACT_SHOT:
2298 case TC_ACT_STOLEN:
2299 kfree_skb(skb);
2300 return NULL;
2301 }
2302
2303out:
2304 skb->tc_verd = 0;
2305 return skb;
2306}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307#endif
2308
Patrick McHardybc1d0412008-07-14 22:49:30 -07002309/*
2310 * netif_nit_deliver - deliver received packets to network taps
2311 * @skb: buffer
2312 *
2313 * This function is used to deliver incoming packets to network
2314 * taps. It should be used when the normal netif_receive_skb path
2315 * is bypassed, for example because of VLAN acceleration.
2316 */
2317void netif_nit_deliver(struct sk_buff *skb)
2318{
2319 struct packet_type *ptype;
2320
2321 if (list_empty(&ptype_all))
2322 return;
2323
2324 skb_reset_network_header(skb);
2325 skb_reset_transport_header(skb);
2326 skb->mac_len = skb->network_header - skb->mac_header;
2327
2328 rcu_read_lock();
2329 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2330 if (!ptype->dev || ptype->dev == skb->dev)
2331 deliver_skb(skb, ptype, skb->dev);
2332 }
2333 rcu_read_unlock();
2334}
2335
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002336/**
2337 * netif_receive_skb - process receive buffer from network
2338 * @skb: buffer to process
2339 *
2340 * netif_receive_skb() is the main receive data processing function.
2341 * It always succeeds. The buffer may be dropped during processing
2342 * for congestion control or by the protocol layers.
2343 *
2344 * This function may only be called from softirq context and interrupts
2345 * should be enabled.
2346 *
2347 * Return values (usually ignored):
2348 * NET_RX_SUCCESS: no congestion
2349 * NET_RX_DROP: packet was dropped
2350 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351int netif_receive_skb(struct sk_buff *skb)
2352{
2353 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002354 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002355 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002357 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002359 if (!skb->tstamp.tv64)
2360 net_timestamp(skb);
2361
Eric Dumazet05423b22009-10-26 18:40:35 -07002362 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002363 return NET_RX_SUCCESS;
2364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002366 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return NET_RX_DROP;
2368
Patrick McHardyc01003c2007-03-29 11:46:52 -07002369 if (!skb->iif)
2370 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002371
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002372 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002373 orig_dev = skb->dev;
2374 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002375 if (skb_bond_should_drop(skb))
2376 null_or_orig = orig_dev; /* deliver only exact match */
2377 else
2378 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002379 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 __get_cpu_var(netdev_rx_stat).total++;
2382
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002383 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002384 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002385 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
2387 pt_prev = NULL;
2388
2389 rcu_read_lock();
2390
2391#ifdef CONFIG_NET_CLS_ACT
2392 if (skb->tc_verd & TC_NCLS) {
2393 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2394 goto ncls;
2395 }
2396#endif
2397
2398 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002399 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2400 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002401 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002402 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 pt_prev = ptype;
2404 }
2405 }
2406
2407#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002408 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2409 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411ncls:
2412#endif
2413
Stephen Hemminger6229e362007-03-21 13:38:47 -07002414 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2415 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002417 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2418 if (!skb)
2419 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002422 list_for_each_entry_rcu(ptype,
2423 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002425 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2426 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002427 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002428 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 pt_prev = ptype;
2430 }
2431 }
2432
2433 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002434 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 } else {
2436 kfree_skb(skb);
2437 /* Jamal, now you will not able to escape explaining
2438 * me how you were going to use this. :-)
2439 */
2440 ret = NET_RX_DROP;
2441 }
2442
2443out:
2444 rcu_read_unlock();
2445 return ret;
2446}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002447EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002449/* Network device is going away, flush any packets still pending */
2450static void flush_backlog(void *arg)
2451{
2452 struct net_device *dev = arg;
2453 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2454 struct sk_buff *skb, *tmp;
2455
2456 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2457 if (skb->dev == dev) {
2458 __skb_unlink(skb, &queue->input_pkt_queue);
2459 kfree_skb(skb);
2460 }
2461}
2462
Herbert Xud565b0a2008-12-15 23:38:52 -08002463static int napi_gro_complete(struct sk_buff *skb)
2464{
2465 struct packet_type *ptype;
2466 __be16 type = skb->protocol;
2467 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2468 int err = -ENOENT;
2469
Herbert Xufc59f9a2009-04-14 15:11:06 -07002470 if (NAPI_GRO_CB(skb)->count == 1) {
2471 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002472 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002473 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002474
2475 rcu_read_lock();
2476 list_for_each_entry_rcu(ptype, head, list) {
2477 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2478 continue;
2479
2480 err = ptype->gro_complete(skb);
2481 break;
2482 }
2483 rcu_read_unlock();
2484
2485 if (err) {
2486 WARN_ON(&ptype->list == head);
2487 kfree_skb(skb);
2488 return NET_RX_SUCCESS;
2489 }
2490
2491out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002492 return netif_receive_skb(skb);
2493}
2494
2495void napi_gro_flush(struct napi_struct *napi)
2496{
2497 struct sk_buff *skb, *next;
2498
2499 for (skb = napi->gro_list; skb; skb = next) {
2500 next = skb->next;
2501 skb->next = NULL;
2502 napi_gro_complete(skb);
2503 }
2504
Herbert Xu4ae55442009-02-08 18:00:36 +00002505 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002506 napi->gro_list = NULL;
2507}
2508EXPORT_SYMBOL(napi_gro_flush);
2509
Ben Hutchings5b252f02009-10-29 07:17:09 +00002510enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002511{
2512 struct sk_buff **pp = NULL;
2513 struct packet_type *ptype;
2514 __be16 type = skb->protocol;
2515 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002516 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002517 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002518 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002519
2520 if (!(skb->dev->features & NETIF_F_GRO))
2521 goto normal;
2522
David S. Miller4cf704f2009-06-09 00:18:51 -07002523 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002524 goto normal;
2525
Herbert Xud565b0a2008-12-15 23:38:52 -08002526 rcu_read_lock();
2527 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002528 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2529 continue;
2530
Herbert Xu86911732009-01-29 14:19:50 +00002531 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002532 mac_len = skb->network_header - skb->mac_header;
2533 skb->mac_len = mac_len;
2534 NAPI_GRO_CB(skb)->same_flow = 0;
2535 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002536 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002537
Herbert Xud565b0a2008-12-15 23:38:52 -08002538 pp = ptype->gro_receive(&napi->gro_list, skb);
2539 break;
2540 }
2541 rcu_read_unlock();
2542
2543 if (&ptype->list == head)
2544 goto normal;
2545
Herbert Xu0da2afd52008-12-26 14:57:42 -08002546 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002547 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002548
Herbert Xud565b0a2008-12-15 23:38:52 -08002549 if (pp) {
2550 struct sk_buff *nskb = *pp;
2551
2552 *pp = nskb->next;
2553 nskb->next = NULL;
2554 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002555 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002556 }
2557
Herbert Xu0da2afd52008-12-26 14:57:42 -08002558 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002559 goto ok;
2560
Herbert Xu4ae55442009-02-08 18:00:36 +00002561 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002562 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002563
Herbert Xu4ae55442009-02-08 18:00:36 +00002564 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002565 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002566 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002567 skb->next = napi->gro_list;
2568 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002569 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002570
Herbert Xuad0f9902009-02-01 01:24:55 -08002571pull:
Herbert Xucb189782009-05-26 18:50:31 +00002572 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2573 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2574
2575 BUG_ON(skb->end - skb->tail < grow);
2576
2577 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2578
2579 skb->tail += grow;
2580 skb->data_len -= grow;
2581
2582 skb_shinfo(skb)->frags[0].page_offset += grow;
2583 skb_shinfo(skb)->frags[0].size -= grow;
2584
2585 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2586 put_page(skb_shinfo(skb)->frags[0].page);
2587 memmove(skb_shinfo(skb)->frags,
2588 skb_shinfo(skb)->frags + 1,
2589 --skb_shinfo(skb)->nr_frags);
2590 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002591 }
2592
Herbert Xud565b0a2008-12-15 23:38:52 -08002593ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002594 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002595
2596normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002597 ret = GRO_NORMAL;
2598 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002599}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002600EXPORT_SYMBOL(dev_gro_receive);
2601
Ben Hutchings5b252f02009-10-29 07:17:09 +00002602static gro_result_t
2603__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002604{
2605 struct sk_buff *p;
2606
Herbert Xud1c76af2009-03-16 10:50:02 -07002607 if (netpoll_rx_on(skb))
2608 return GRO_NORMAL;
2609
Herbert Xu96e93ea2009-01-06 10:49:34 -08002610 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002611 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2612 && !compare_ether_header(skb_mac_header(p),
2613 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002614 NAPI_GRO_CB(p)->flush = 0;
2615 }
2616
2617 return dev_gro_receive(napi, skb);
2618}
Herbert Xu5d38a072009-01-04 16:13:40 -08002619
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002620gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002621{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002622 switch (ret) {
2623 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002624 if (netif_receive_skb(skb))
2625 ret = GRO_DROP;
2626 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002627
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002628 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002629 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002630 kfree_skb(skb);
2631 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002632
2633 case GRO_HELD:
2634 case GRO_MERGED:
2635 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002636 }
2637
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002638 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002639}
2640EXPORT_SYMBOL(napi_skb_finish);
2641
Herbert Xu78a478d2009-05-26 18:50:21 +00002642void skb_gro_reset_offset(struct sk_buff *skb)
2643{
2644 NAPI_GRO_CB(skb)->data_offset = 0;
2645 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002646 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002647
Herbert Xu78d3fd02009-05-26 18:50:23 +00002648 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002649 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002650 NAPI_GRO_CB(skb)->frag0 =
2651 page_address(skb_shinfo(skb)->frags[0].page) +
2652 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002653 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2654 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002655}
2656EXPORT_SYMBOL(skb_gro_reset_offset);
2657
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002658gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002659{
Herbert Xu86911732009-01-29 14:19:50 +00002660 skb_gro_reset_offset(skb);
2661
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002662 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002663}
2664EXPORT_SYMBOL(napi_gro_receive);
2665
Herbert Xu96e93ea2009-01-06 10:49:34 -08002666void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2667{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002668 __skb_pull(skb, skb_headlen(skb));
2669 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2670
2671 napi->skb = skb;
2672}
2673EXPORT_SYMBOL(napi_reuse_skb);
2674
Herbert Xu76620aa2009-04-16 02:02:07 -07002675struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002676{
Herbert Xu5d38a072009-01-04 16:13:40 -08002677 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002678
2679 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002680 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2681 if (skb)
2682 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002683 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002684 return skb;
2685}
Herbert Xu76620aa2009-04-16 02:02:07 -07002686EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002687
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002688gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2689 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002690{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002691 switch (ret) {
2692 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002693 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002694 skb->protocol = eth_type_trans(skb, napi->dev);
2695
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002696 if (ret == GRO_HELD)
2697 skb_gro_pull(skb, -ETH_HLEN);
2698 else if (netif_receive_skb(skb))
2699 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002700 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002701
2702 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002703 case GRO_MERGED_FREE:
2704 napi_reuse_skb(napi, skb);
2705 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002706
2707 case GRO_MERGED:
2708 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002709 }
2710
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002711 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002712}
2713EXPORT_SYMBOL(napi_frags_finish);
2714
Herbert Xu76620aa2009-04-16 02:02:07 -07002715struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002716{
Herbert Xu76620aa2009-04-16 02:02:07 -07002717 struct sk_buff *skb = napi->skb;
2718 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002719 unsigned int hlen;
2720 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002721
2722 napi->skb = NULL;
2723
2724 skb_reset_mac_header(skb);
2725 skb_gro_reset_offset(skb);
2726
Herbert Xua5b1cf22009-05-26 18:50:28 +00002727 off = skb_gro_offset(skb);
2728 hlen = off + sizeof(*eth);
2729 eth = skb_gro_header_fast(skb, off);
2730 if (skb_gro_header_hard(skb, hlen)) {
2731 eth = skb_gro_header_slow(skb, hlen, off);
2732 if (unlikely(!eth)) {
2733 napi_reuse_skb(napi, skb);
2734 skb = NULL;
2735 goto out;
2736 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002737 }
2738
2739 skb_gro_pull(skb, sizeof(*eth));
2740
2741 /*
2742 * This works because the only protocols we care about don't require
2743 * special handling. We'll fix it up properly at the end.
2744 */
2745 skb->protocol = eth->h_proto;
2746
2747out:
2748 return skb;
2749}
2750EXPORT_SYMBOL(napi_frags_skb);
2751
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002752gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002753{
2754 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002755
2756 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002757 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002758
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002759 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002760}
2761EXPORT_SYMBOL(napi_gro_frags);
2762
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002763static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764{
2765 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2767 unsigned long start_time = jiffies;
2768
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002769 napi->weight = weight_p;
2770 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773 local_irq_disable();
2774 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002775 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002776 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002777 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002778 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 local_irq_enable();
2781
Herbert Xu8f1ead22009-03-26 00:59:10 -07002782 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002783 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002785 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786}
2787
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002788/**
2789 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002790 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002791 *
2792 * The entry's receive function will be scheduled to run
2793 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002794void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002795{
2796 unsigned long flags;
2797
2798 local_irq_save(flags);
2799 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2800 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2801 local_irq_restore(flags);
2802}
2803EXPORT_SYMBOL(__napi_schedule);
2804
Herbert Xud565b0a2008-12-15 23:38:52 -08002805void __napi_complete(struct napi_struct *n)
2806{
2807 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2808 BUG_ON(n->gro_list);
2809
2810 list_del(&n->poll_list);
2811 smp_mb__before_clear_bit();
2812 clear_bit(NAPI_STATE_SCHED, &n->state);
2813}
2814EXPORT_SYMBOL(__napi_complete);
2815
2816void napi_complete(struct napi_struct *n)
2817{
2818 unsigned long flags;
2819
2820 /*
2821 * don't let napi dequeue from the cpu poll list
2822 * just in case its running on a different cpu
2823 */
2824 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2825 return;
2826
2827 napi_gro_flush(n);
2828 local_irq_save(flags);
2829 __napi_complete(n);
2830 local_irq_restore(flags);
2831}
2832EXPORT_SYMBOL(napi_complete);
2833
2834void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2835 int (*poll)(struct napi_struct *, int), int weight)
2836{
2837 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002838 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002839 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002840 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002841 napi->poll = poll;
2842 napi->weight = weight;
2843 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002844 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002845#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002846 spin_lock_init(&napi->poll_lock);
2847 napi->poll_owner = -1;
2848#endif
2849 set_bit(NAPI_STATE_SCHED, &napi->state);
2850}
2851EXPORT_SYMBOL(netif_napi_add);
2852
2853void netif_napi_del(struct napi_struct *napi)
2854{
2855 struct sk_buff *skb, *next;
2856
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002857 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002858 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002859
2860 for (skb = napi->gro_list; skb; skb = next) {
2861 next = skb->next;
2862 skb->next = NULL;
2863 kfree_skb(skb);
2864 }
2865
2866 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002867 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002868}
2869EXPORT_SYMBOL(netif_napi_del);
2870
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872static void net_rx_action(struct softirq_action *h)
2873{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002874 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002875 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002876 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002877 void *have;
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 local_irq_disable();
2880
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002881 while (!list_empty(list)) {
2882 struct napi_struct *n;
2883 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002885 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002886 * Allow this to run for 2 jiffies since which will allow
2887 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002888 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002889 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 goto softnet_break;
2891
2892 local_irq_enable();
2893
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002894 /* Even though interrupts have been re-enabled, this
2895 * access is safe because interrupts can only add new
2896 * entries to the tail of this list, and only ->poll()
2897 * calls can remove this head entry from the list.
2898 */
2899 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002901 have = netpoll_poll_lock(n);
2902
2903 weight = n->weight;
2904
David S. Miller0a7606c2007-10-29 21:28:47 -07002905 /* This NAPI_STATE_SCHED test is for avoiding a race
2906 * with netpoll's poll_napi(). Only the entity which
2907 * obtains the lock and sees NAPI_STATE_SCHED set will
2908 * actually make the ->poll() call. Therefore we avoid
2909 * accidently calling ->poll() when NAPI is not scheduled.
2910 */
2911 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002912 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002913 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002914 trace_napi_poll(n);
2915 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002916
2917 WARN_ON_ONCE(work > weight);
2918
2919 budget -= work;
2920
2921 local_irq_disable();
2922
2923 /* Drivers must not modify the NAPI state if they
2924 * consume the entire weight. In such cases this code
2925 * still "owns" the NAPI instance and therefore can
2926 * move the instance around on the list at-will.
2927 */
David S. Millerfed17f32008-01-07 21:00:40 -08002928 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002929 if (unlikely(napi_disable_pending(n))) {
2930 local_irq_enable();
2931 napi_complete(n);
2932 local_irq_disable();
2933 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002934 list_move_tail(&n->poll_list, list);
2935 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002936
2937 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
2939out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002940 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002941
Chris Leechdb217332006-06-17 21:24:58 -07002942#ifdef CONFIG_NET_DMA
2943 /*
2944 * There may not be any more sk_buffs coming right now, so push
2945 * any pending DMA copies to hardware
2946 */
Dan Williams2ba05622009-01-06 11:38:14 -07002947 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002948#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 return;
2951
2952softnet_break:
2953 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2954 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2955 goto out;
2956}
2957
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002958static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960/**
2961 * register_gifconf - register a SIOCGIF handler
2962 * @family: Address family
2963 * @gifconf: Function handler
2964 *
2965 * Register protocol dependent address dumping routines. The handler
2966 * that is passed must not be freed or reused until it has been replaced
2967 * by another handler.
2968 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002969int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970{
2971 if (family >= NPROTO)
2972 return -EINVAL;
2973 gifconf_list[family] = gifconf;
2974 return 0;
2975}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002976EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
2978
2979/*
2980 * Map an interface index to its name (SIOCGIFNAME)
2981 */
2982
2983/*
2984 * We need this ioctl for efficient implementation of the
2985 * if_indextoname() function required by the IPv6 API. Without
2986 * it, we would have to search all the interfaces to find a
2987 * match. --pb
2988 */
2989
Eric W. Biederman881d9662007-09-17 11:56:21 -07002990static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991{
2992 struct net_device *dev;
2993 struct ifreq ifr;
2994
2995 /*
2996 * Fetch the caller's info block.
2997 */
2998
2999 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3000 return -EFAULT;
3001
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003002 rcu_read_lock();
3003 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003005 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 return -ENODEV;
3007 }
3008
3009 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003010 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
3012 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3013 return -EFAULT;
3014 return 0;
3015}
3016
3017/*
3018 * Perform a SIOCGIFCONF call. This structure will change
3019 * size eventually, and there is nothing I can do about it.
3020 * Thus we will need a 'compatibility mode'.
3021 */
3022
Eric W. Biederman881d9662007-09-17 11:56:21 -07003023static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024{
3025 struct ifconf ifc;
3026 struct net_device *dev;
3027 char __user *pos;
3028 int len;
3029 int total;
3030 int i;
3031
3032 /*
3033 * Fetch the caller's info block.
3034 */
3035
3036 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3037 return -EFAULT;
3038
3039 pos = ifc.ifc_buf;
3040 len = ifc.ifc_len;
3041
3042 /*
3043 * Loop over the interfaces, and write an info block for each.
3044 */
3045
3046 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003047 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 for (i = 0; i < NPROTO; i++) {
3049 if (gifconf_list[i]) {
3050 int done;
3051 if (!pos)
3052 done = gifconf_list[i](dev, NULL, 0);
3053 else
3054 done = gifconf_list[i](dev, pos + total,
3055 len - total);
3056 if (done < 0)
3057 return -EFAULT;
3058 total += done;
3059 }
3060 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
3063 /*
3064 * All done. Write the updated control block back to the caller.
3065 */
3066 ifc.ifc_len = total;
3067
3068 /*
3069 * Both BSD and Solaris return 0 here, so we do too.
3070 */
3071 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3072}
3073
3074#ifdef CONFIG_PROC_FS
3075/*
3076 * This is invoked by the /proc filesystem handler to display a device
3077 * in detail.
3078 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003080 __acquires(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081{
Denis V. Luneve372c412007-11-19 22:31:54 -08003082 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003083 loff_t off;
3084 struct net_device *dev;
3085
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003086 rcu_read_lock();
Pavel Emelianov7562f872007-05-03 15:13:45 -07003087 if (!*pos)
3088 return SEQ_START_TOKEN;
3089
3090 off = 1;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003091 for_each_netdev_rcu(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003092 if (off++ == *pos)
3093 return dev;
3094
3095 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096}
3097
3098void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3099{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003100 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3101 first_net_device(seq_file_net(seq)) :
3102 next_net_device((struct net_device *)v);
3103
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 ++*pos;
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003105 return rcu_dereference(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106}
3107
3108void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003109 __releases(RCU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110{
Eric Dumazetc6d14c82009-11-04 05:43:23 -08003111 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112}
3113
3114static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3115{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003116 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003117
Rusty Russell5a1b5892007-04-28 21:04:03 -07003118 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3119 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3120 dev->name, stats->rx_bytes, stats->rx_packets,
3121 stats->rx_errors,
3122 stats->rx_dropped + stats->rx_missed_errors,
3123 stats->rx_fifo_errors,
3124 stats->rx_length_errors + stats->rx_over_errors +
3125 stats->rx_crc_errors + stats->rx_frame_errors,
3126 stats->rx_compressed, stats->multicast,
3127 stats->tx_bytes, stats->tx_packets,
3128 stats->tx_errors, stats->tx_dropped,
3129 stats->tx_fifo_errors, stats->collisions,
3130 stats->tx_carrier_errors +
3131 stats->tx_aborted_errors +
3132 stats->tx_window_errors +
3133 stats->tx_heartbeat_errors,
3134 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135}
3136
3137/*
3138 * Called from the PROCfs module. This now uses the new arbitrary sized
3139 * /proc/net interface to create /proc/net/dev
3140 */
3141static int dev_seq_show(struct seq_file *seq, void *v)
3142{
3143 if (v == SEQ_START_TOKEN)
3144 seq_puts(seq, "Inter-| Receive "
3145 " | Transmit\n"
3146 " face |bytes packets errs drop fifo frame "
3147 "compressed multicast|bytes packets errs "
3148 "drop fifo colls carrier compressed\n");
3149 else
3150 dev_seq_printf_stats(seq, v);
3151 return 0;
3152}
3153
3154static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3155{
3156 struct netif_rx_stats *rc = NULL;
3157
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003158 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003159 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 rc = &per_cpu(netdev_rx_stat, *pos);
3161 break;
3162 } else
3163 ++*pos;
3164 return rc;
3165}
3166
3167static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3168{
3169 return softnet_get_online(pos);
3170}
3171
3172static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3173{
3174 ++*pos;
3175 return softnet_get_online(pos);
3176}
3177
3178static void softnet_seq_stop(struct seq_file *seq, void *v)
3179{
3180}
3181
3182static int softnet_seq_show(struct seq_file *seq, void *v)
3183{
3184 struct netif_rx_stats *s = v;
3185
3186 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003187 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003188 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003189 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 return 0;
3191}
3192
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003193static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 .start = dev_seq_start,
3195 .next = dev_seq_next,
3196 .stop = dev_seq_stop,
3197 .show = dev_seq_show,
3198};
3199
3200static int dev_seq_open(struct inode *inode, struct file *file)
3201{
Denis V. Luneve372c412007-11-19 22:31:54 -08003202 return seq_open_net(inode, file, &dev_seq_ops,
3203 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204}
3205
Arjan van de Ven9a321442007-02-12 00:55:35 -08003206static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 .owner = THIS_MODULE,
3208 .open = dev_seq_open,
3209 .read = seq_read,
3210 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003211 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212};
3213
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003214static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 .start = softnet_seq_start,
3216 .next = softnet_seq_next,
3217 .stop = softnet_seq_stop,
3218 .show = softnet_seq_show,
3219};
3220
3221static int softnet_seq_open(struct inode *inode, struct file *file)
3222{
3223 return seq_open(file, &softnet_seq_ops);
3224}
3225
Arjan van de Ven9a321442007-02-12 00:55:35 -08003226static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 .owner = THIS_MODULE,
3228 .open = softnet_seq_open,
3229 .read = seq_read,
3230 .llseek = seq_lseek,
3231 .release = seq_release,
3232};
3233
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003234static void *ptype_get_idx(loff_t pos)
3235{
3236 struct packet_type *pt = NULL;
3237 loff_t i = 0;
3238 int t;
3239
3240 list_for_each_entry_rcu(pt, &ptype_all, list) {
3241 if (i == pos)
3242 return pt;
3243 ++i;
3244 }
3245
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003246 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003247 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3248 if (i == pos)
3249 return pt;
3250 ++i;
3251 }
3252 }
3253 return NULL;
3254}
3255
3256static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003257 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003258{
3259 rcu_read_lock();
3260 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3261}
3262
3263static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3264{
3265 struct packet_type *pt;
3266 struct list_head *nxt;
3267 int hash;
3268
3269 ++*pos;
3270 if (v == SEQ_START_TOKEN)
3271 return ptype_get_idx(0);
3272
3273 pt = v;
3274 nxt = pt->list.next;
3275 if (pt->type == htons(ETH_P_ALL)) {
3276 if (nxt != &ptype_all)
3277 goto found;
3278 hash = 0;
3279 nxt = ptype_base[0].next;
3280 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003281 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003282
3283 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003284 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003285 return NULL;
3286 nxt = ptype_base[hash].next;
3287 }
3288found:
3289 return list_entry(nxt, struct packet_type, list);
3290}
3291
3292static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003293 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003294{
3295 rcu_read_unlock();
3296}
3297
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003298static int ptype_seq_show(struct seq_file *seq, void *v)
3299{
3300 struct packet_type *pt = v;
3301
3302 if (v == SEQ_START_TOKEN)
3303 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003304 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003305 if (pt->type == htons(ETH_P_ALL))
3306 seq_puts(seq, "ALL ");
3307 else
3308 seq_printf(seq, "%04x", ntohs(pt->type));
3309
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003310 seq_printf(seq, " %-8s %pF\n",
3311 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003312 }
3313
3314 return 0;
3315}
3316
3317static const struct seq_operations ptype_seq_ops = {
3318 .start = ptype_seq_start,
3319 .next = ptype_seq_next,
3320 .stop = ptype_seq_stop,
3321 .show = ptype_seq_show,
3322};
3323
3324static int ptype_seq_open(struct inode *inode, struct file *file)
3325{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003326 return seq_open_net(inode, file, &ptype_seq_ops,
3327 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003328}
3329
3330static const struct file_operations ptype_seq_fops = {
3331 .owner = THIS_MODULE,
3332 .open = ptype_seq_open,
3333 .read = seq_read,
3334 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003335 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003336};
3337
3338
Pavel Emelyanov46650792007-10-08 20:38:39 -07003339static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340{
3341 int rc = -ENOMEM;
3342
Eric W. Biederman881d9662007-09-17 11:56:21 -07003343 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003345 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003347 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003348 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003349
Eric W. Biederman881d9662007-09-17 11:56:21 -07003350 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003351 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 rc = 0;
3353out:
3354 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003355out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003356 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003358 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003360 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 goto out;
3362}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003363
Pavel Emelyanov46650792007-10-08 20:38:39 -07003364static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003365{
3366 wext_proc_exit(net);
3367
3368 proc_net_remove(net, "ptype");
3369 proc_net_remove(net, "softnet_stat");
3370 proc_net_remove(net, "dev");
3371}
3372
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003373static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003374 .init = dev_proc_net_init,
3375 .exit = dev_proc_net_exit,
3376};
3377
3378static int __init dev_proc_init(void)
3379{
3380 return register_pernet_subsys(&dev_proc_ops);
3381}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382#else
3383#define dev_proc_init() 0
3384#endif /* CONFIG_PROC_FS */
3385
3386
3387/**
3388 * netdev_set_master - set up master/slave pair
3389 * @slave: slave device
3390 * @master: new master device
3391 *
3392 * Changes the master device of the slave. Pass %NULL to break the
3393 * bonding. The caller must hold the RTNL semaphore. On a failure
3394 * a negative errno code is returned. On success the reference counts
3395 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3396 * function returns zero.
3397 */
3398int netdev_set_master(struct net_device *slave, struct net_device *master)
3399{
3400 struct net_device *old = slave->master;
3401
3402 ASSERT_RTNL();
3403
3404 if (master) {
3405 if (old)
3406 return -EBUSY;
3407 dev_hold(master);
3408 }
3409
3410 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003411
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 synchronize_net();
3413
3414 if (old)
3415 dev_put(old);
3416
3417 if (master)
3418 slave->flags |= IFF_SLAVE;
3419 else
3420 slave->flags &= ~IFF_SLAVE;
3421
3422 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3423 return 0;
3424}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003425EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003427static void dev_change_rx_flags(struct net_device *dev, int flags)
3428{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003429 const struct net_device_ops *ops = dev->netdev_ops;
3430
3431 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3432 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003433}
3434
Wang Chendad9b332008-06-18 01:48:28 -07003435static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003436{
3437 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003438 uid_t uid;
3439 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003440
Patrick McHardy24023452007-07-14 18:51:31 -07003441 ASSERT_RTNL();
3442
Wang Chendad9b332008-06-18 01:48:28 -07003443 dev->flags |= IFF_PROMISC;
3444 dev->promiscuity += inc;
3445 if (dev->promiscuity == 0) {
3446 /*
3447 * Avoid overflow.
3448 * If inc causes overflow, untouch promisc and return error.
3449 */
3450 if (inc < 0)
3451 dev->flags &= ~IFF_PROMISC;
3452 else {
3453 dev->promiscuity -= inc;
3454 printk(KERN_WARNING "%s: promiscuity touches roof, "
3455 "set promiscuity failed, promiscuity feature "
3456 "of device might be broken.\n", dev->name);
3457 return -EOVERFLOW;
3458 }
3459 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003460 if (dev->flags != old_flags) {
3461 printk(KERN_INFO "device %s %s promiscuous mode\n",
3462 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3463 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003464 if (audit_enabled) {
3465 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003466 audit_log(current->audit_context, GFP_ATOMIC,
3467 AUDIT_ANOM_PROMISCUOUS,
3468 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3469 dev->name, (dev->flags & IFF_PROMISC),
3470 (old_flags & IFF_PROMISC),
3471 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003472 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003473 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003474 }
Patrick McHardy24023452007-07-14 18:51:31 -07003475
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003476 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003477 }
Wang Chendad9b332008-06-18 01:48:28 -07003478 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003479}
3480
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481/**
3482 * dev_set_promiscuity - update promiscuity count on a device
3483 * @dev: device
3484 * @inc: modifier
3485 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003486 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 * remains above zero the interface remains promiscuous. Once it hits zero
3488 * the device reverts back to normal filtering operation. A negative inc
3489 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003490 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 */
Wang Chendad9b332008-06-18 01:48:28 -07003492int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493{
3494 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003495 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496
Wang Chendad9b332008-06-18 01:48:28 -07003497 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003498 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003499 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003500 if (dev->flags != old_flags)
3501 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003502 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003504EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505
3506/**
3507 * dev_set_allmulti - update allmulti count on a device
3508 * @dev: device
3509 * @inc: modifier
3510 *
3511 * Add or remove reception of all multicast frames to a device. While the
3512 * count in the device remains above zero the interface remains listening
3513 * to all interfaces. Once it hits zero the device reverts back to normal
3514 * filtering operation. A negative @inc value is used to drop the counter
3515 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003516 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 */
3518
Wang Chendad9b332008-06-18 01:48:28 -07003519int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520{
3521 unsigned short old_flags = dev->flags;
3522
Patrick McHardy24023452007-07-14 18:51:31 -07003523 ASSERT_RTNL();
3524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003526 dev->allmulti += inc;
3527 if (dev->allmulti == 0) {
3528 /*
3529 * Avoid overflow.
3530 * If inc causes overflow, untouch allmulti and return error.
3531 */
3532 if (inc < 0)
3533 dev->flags &= ~IFF_ALLMULTI;
3534 else {
3535 dev->allmulti -= inc;
3536 printk(KERN_WARNING "%s: allmulti touches roof, "
3537 "set allmulti failed, allmulti feature of "
3538 "device might be broken.\n", dev->name);
3539 return -EOVERFLOW;
3540 }
3541 }
Patrick McHardy24023452007-07-14 18:51:31 -07003542 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003543 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003544 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003545 }
Wang Chendad9b332008-06-18 01:48:28 -07003546 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003547}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003548EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003549
3550/*
3551 * Upload unicast and multicast address lists to device and
3552 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003553 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003554 * are present.
3555 */
3556void __dev_set_rx_mode(struct net_device *dev)
3557{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003558 const struct net_device_ops *ops = dev->netdev_ops;
3559
Patrick McHardy4417da62007-06-27 01:28:10 -07003560 /* dev_open will call this function so the list will stay sane. */
3561 if (!(dev->flags&IFF_UP))
3562 return;
3563
3564 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003565 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003566
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003567 if (ops->ndo_set_rx_mode)
3568 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003569 else {
3570 /* Unicast addresses changes may only happen under the rtnl,
3571 * therefore calling __dev_set_promiscuity here is safe.
3572 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003573 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003574 __dev_set_promiscuity(dev, 1);
3575 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003576 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003577 __dev_set_promiscuity(dev, -1);
3578 dev->uc_promisc = 0;
3579 }
3580
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003581 if (ops->ndo_set_multicast_list)
3582 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003583 }
3584}
3585
3586void dev_set_rx_mode(struct net_device *dev)
3587{
David S. Millerb9e40852008-07-15 00:15:08 -07003588 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003589 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003590 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591}
3592
Jiri Pirkof001fde2009-05-05 02:48:28 +00003593/* hw addresses list handling functions */
3594
Jiri Pirko31278e72009-06-17 01:12:19 +00003595static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3596 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003597{
3598 struct netdev_hw_addr *ha;
3599 int alloc_size;
3600
3601 if (addr_len > MAX_ADDR_LEN)
3602 return -EINVAL;
3603
Jiri Pirko31278e72009-06-17 01:12:19 +00003604 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003605 if (!memcmp(ha->addr, addr, addr_len) &&
3606 ha->type == addr_type) {
3607 ha->refcount++;
3608 return 0;
3609 }
3610 }
3611
3612
Jiri Pirkof001fde2009-05-05 02:48:28 +00003613 alloc_size = sizeof(*ha);
3614 if (alloc_size < L1_CACHE_BYTES)
3615 alloc_size = L1_CACHE_BYTES;
3616 ha = kmalloc(alloc_size, GFP_ATOMIC);
3617 if (!ha)
3618 return -ENOMEM;
3619 memcpy(ha->addr, addr, addr_len);
3620 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003621 ha->refcount = 1;
3622 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003623 list_add_tail_rcu(&ha->list, &list->list);
3624 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003625 return 0;
3626}
3627
3628static void ha_rcu_free(struct rcu_head *head)
3629{
3630 struct netdev_hw_addr *ha;
3631
3632 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3633 kfree(ha);
3634}
3635
Jiri Pirko31278e72009-06-17 01:12:19 +00003636static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3637 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003638{
3639 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003640
Jiri Pirko31278e72009-06-17 01:12:19 +00003641 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003642 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003643 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003644 if (--ha->refcount)
3645 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003646 list_del_rcu(&ha->list);
3647 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003648 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003649 return 0;
3650 }
3651 }
3652 return -ENOENT;
3653}
3654
Jiri Pirko31278e72009-06-17 01:12:19 +00003655static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3656 struct netdev_hw_addr_list *from_list,
3657 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003658 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003659{
3660 int err;
3661 struct netdev_hw_addr *ha, *ha2;
3662 unsigned char type;
3663
Jiri Pirko31278e72009-06-17 01:12:19 +00003664 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003665 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003666 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003667 if (err)
3668 goto unroll;
3669 }
3670 return 0;
3671
3672unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003673 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003674 if (ha2 == ha)
3675 break;
3676 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003677 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003678 }
3679 return err;
3680}
3681
Jiri Pirko31278e72009-06-17 01:12:19 +00003682static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3683 struct netdev_hw_addr_list *from_list,
3684 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003685 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003686{
3687 struct netdev_hw_addr *ha;
3688 unsigned char type;
3689
Jiri Pirko31278e72009-06-17 01:12:19 +00003690 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003691 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003692 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003693 }
3694}
3695
Jiri Pirko31278e72009-06-17 01:12:19 +00003696static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3697 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003698 int addr_len)
3699{
3700 int err = 0;
3701 struct netdev_hw_addr *ha, *tmp;
3702
Jiri Pirko31278e72009-06-17 01:12:19 +00003703 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003704 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003705 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003706 addr_len, ha->type);
3707 if (err)
3708 break;
3709 ha->synced = true;
3710 ha->refcount++;
3711 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003712 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3713 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003714 }
3715 }
3716 return err;
3717}
3718
Jiri Pirko31278e72009-06-17 01:12:19 +00003719static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3720 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003721 int addr_len)
3722{
3723 struct netdev_hw_addr *ha, *tmp;
3724
Jiri Pirko31278e72009-06-17 01:12:19 +00003725 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003726 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003727 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003728 addr_len, ha->type);
3729 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003730 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003731 addr_len, ha->type);
3732 }
3733 }
3734}
3735
Jiri Pirko31278e72009-06-17 01:12:19 +00003736static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003737{
3738 struct netdev_hw_addr *ha, *tmp;
3739
Jiri Pirko31278e72009-06-17 01:12:19 +00003740 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003741 list_del_rcu(&ha->list);
3742 call_rcu(&ha->rcu_head, ha_rcu_free);
3743 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003744 list->count = 0;
3745}
3746
3747static void __hw_addr_init(struct netdev_hw_addr_list *list)
3748{
3749 INIT_LIST_HEAD(&list->list);
3750 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003751}
3752
3753/* Device addresses handling functions */
3754
3755static void dev_addr_flush(struct net_device *dev)
3756{
3757 /* rtnl_mutex must be held here */
3758
Jiri Pirko31278e72009-06-17 01:12:19 +00003759 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003760 dev->dev_addr = NULL;
3761}
3762
3763static int dev_addr_init(struct net_device *dev)
3764{
3765 unsigned char addr[MAX_ADDR_LEN];
3766 struct netdev_hw_addr *ha;
3767 int err;
3768
3769 /* rtnl_mutex must be held here */
3770
Jiri Pirko31278e72009-06-17 01:12:19 +00003771 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003772 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003773 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003774 NETDEV_HW_ADDR_T_LAN);
3775 if (!err) {
3776 /*
3777 * Get the first (previously created) address from the list
3778 * and set dev_addr pointer to this location.
3779 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003780 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003781 struct netdev_hw_addr, list);
3782 dev->dev_addr = ha->addr;
3783 }
3784 return err;
3785}
3786
3787/**
3788 * dev_addr_add - Add a device address
3789 * @dev: device
3790 * @addr: address to add
3791 * @addr_type: address type
3792 *
3793 * Add a device address to the device or increase the reference count if
3794 * it already exists.
3795 *
3796 * The caller must hold the rtnl_mutex.
3797 */
3798int dev_addr_add(struct net_device *dev, unsigned char *addr,
3799 unsigned char addr_type)
3800{
3801 int err;
3802
3803 ASSERT_RTNL();
3804
Jiri Pirko31278e72009-06-17 01:12:19 +00003805 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003806 if (!err)
3807 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3808 return err;
3809}
3810EXPORT_SYMBOL(dev_addr_add);
3811
3812/**
3813 * dev_addr_del - Release a device address.
3814 * @dev: device
3815 * @addr: address to delete
3816 * @addr_type: address type
3817 *
3818 * Release reference to a device address and remove it from the device
3819 * if the reference count drops to zero.
3820 *
3821 * The caller must hold the rtnl_mutex.
3822 */
3823int dev_addr_del(struct net_device *dev, unsigned char *addr,
3824 unsigned char addr_type)
3825{
3826 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003827 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003828
3829 ASSERT_RTNL();
3830
Jiri Pirkoccffad252009-05-22 23:22:17 +00003831 /*
3832 * We can not remove the first address from the list because
3833 * dev->dev_addr points to that.
3834 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003835 ha = list_first_entry(&dev->dev_addrs.list,
3836 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003837 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3838 return -ENOENT;
3839
Jiri Pirko31278e72009-06-17 01:12:19 +00003840 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003841 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003842 if (!err)
3843 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3844 return err;
3845}
3846EXPORT_SYMBOL(dev_addr_del);
3847
3848/**
3849 * dev_addr_add_multiple - Add device addresses from another device
3850 * @to_dev: device to which addresses will be added
3851 * @from_dev: device from which addresses will be added
3852 * @addr_type: address type - 0 means type will be used from from_dev
3853 *
3854 * Add device addresses of the one device to another.
3855 **
3856 * The caller must hold the rtnl_mutex.
3857 */
3858int dev_addr_add_multiple(struct net_device *to_dev,
3859 struct net_device *from_dev,
3860 unsigned char addr_type)
3861{
3862 int err;
3863
3864 ASSERT_RTNL();
3865
3866 if (from_dev->addr_len != to_dev->addr_len)
3867 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003868 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003869 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003870 if (!err)
3871 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3872 return err;
3873}
3874EXPORT_SYMBOL(dev_addr_add_multiple);
3875
3876/**
3877 * dev_addr_del_multiple - Delete device addresses by another device
3878 * @to_dev: device where the addresses will be deleted
3879 * @from_dev: device by which addresses the addresses will be deleted
3880 * @addr_type: address type - 0 means type will used from from_dev
3881 *
3882 * Deletes addresses in to device by the list of addresses in from device.
3883 *
3884 * The caller must hold the rtnl_mutex.
3885 */
3886int dev_addr_del_multiple(struct net_device *to_dev,
3887 struct net_device *from_dev,
3888 unsigned char addr_type)
3889{
3890 ASSERT_RTNL();
3891
3892 if (from_dev->addr_len != to_dev->addr_len)
3893 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003894 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003895 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003896 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3897 return 0;
3898}
3899EXPORT_SYMBOL(dev_addr_del_multiple);
3900
Jiri Pirko31278e72009-06-17 01:12:19 +00003901/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003902
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003903int __dev_addr_delete(struct dev_addr_list **list, int *count,
3904 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003905{
3906 struct dev_addr_list *da;
3907
3908 for (; (da = *list) != NULL; list = &da->next) {
3909 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3910 alen == da->da_addrlen) {
3911 if (glbl) {
3912 int old_glbl = da->da_gusers;
3913 da->da_gusers = 0;
3914 if (old_glbl == 0)
3915 break;
3916 }
3917 if (--da->da_users)
3918 return 0;
3919
3920 *list = da->next;
3921 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003922 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003923 return 0;
3924 }
3925 }
3926 return -ENOENT;
3927}
3928
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003929int __dev_addr_add(struct dev_addr_list **list, int *count,
3930 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003931{
3932 struct dev_addr_list *da;
3933
3934 for (da = *list; da != NULL; da = da->next) {
3935 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3936 da->da_addrlen == alen) {
3937 if (glbl) {
3938 int old_glbl = da->da_gusers;
3939 da->da_gusers = 1;
3940 if (old_glbl)
3941 return 0;
3942 }
3943 da->da_users++;
3944 return 0;
3945 }
3946 }
3947
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003948 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003949 if (da == NULL)
3950 return -ENOMEM;
3951 memcpy(da->da_addr, addr, alen);
3952 da->da_addrlen = alen;
3953 da->da_users = 1;
3954 da->da_gusers = glbl ? 1 : 0;
3955 da->next = *list;
3956 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003957 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003958 return 0;
3959}
3960
Patrick McHardy4417da62007-06-27 01:28:10 -07003961/**
3962 * dev_unicast_delete - Release secondary unicast address.
3963 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003964 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003965 *
3966 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003967 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003968 *
3969 * The caller must hold the rtnl_mutex.
3970 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003971int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003972{
3973 int err;
3974
3975 ASSERT_RTNL();
3976
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003977 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003978 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3979 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003980 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003981 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003982 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003983 return err;
3984}
3985EXPORT_SYMBOL(dev_unicast_delete);
3986
3987/**
3988 * dev_unicast_add - add a secondary unicast address
3989 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003990 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003991 *
3992 * Add a secondary unicast address to the device or increase
3993 * the reference count if it already exists.
3994 *
3995 * The caller must hold the rtnl_mutex.
3996 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003997int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003998{
3999 int err;
4000
4001 ASSERT_RTNL();
4002
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004003 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004004 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4005 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004006 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004007 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004008 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004009 return err;
4010}
4011EXPORT_SYMBOL(dev_unicast_add);
4012
Chris Leeche83a2ea2008-01-31 16:53:23 -08004013int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4014 struct dev_addr_list **from, int *from_count)
4015{
4016 struct dev_addr_list *da, *next;
4017 int err = 0;
4018
4019 da = *from;
4020 while (da != NULL) {
4021 next = da->next;
4022 if (!da->da_synced) {
4023 err = __dev_addr_add(to, to_count,
4024 da->da_addr, da->da_addrlen, 0);
4025 if (err < 0)
4026 break;
4027 da->da_synced = 1;
4028 da->da_users++;
4029 } else if (da->da_users == 1) {
4030 __dev_addr_delete(to, to_count,
4031 da->da_addr, da->da_addrlen, 0);
4032 __dev_addr_delete(from, from_count,
4033 da->da_addr, da->da_addrlen, 0);
4034 }
4035 da = next;
4036 }
4037 return err;
4038}
Johannes Bergc4029082009-06-17 17:43:30 +02004039EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004040
4041void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4042 struct dev_addr_list **from, int *from_count)
4043{
4044 struct dev_addr_list *da, *next;
4045
4046 da = *from;
4047 while (da != NULL) {
4048 next = da->next;
4049 if (da->da_synced) {
4050 __dev_addr_delete(to, to_count,
4051 da->da_addr, da->da_addrlen, 0);
4052 da->da_synced = 0;
4053 __dev_addr_delete(from, from_count,
4054 da->da_addr, da->da_addrlen, 0);
4055 }
4056 da = next;
4057 }
4058}
Johannes Bergc4029082009-06-17 17:43:30 +02004059EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004060
4061/**
4062 * dev_unicast_sync - Synchronize device's unicast list to another device
4063 * @to: destination device
4064 * @from: source device
4065 *
4066 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004067 * addresses that have no users left. The source device must be
4068 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004069 *
4070 * This function is intended to be called from the dev->set_rx_mode
4071 * function of layered software devices.
4072 */
4073int dev_unicast_sync(struct net_device *to, struct net_device *from)
4074{
4075 int err = 0;
4076
Jiri Pirkoccffad252009-05-22 23:22:17 +00004077 if (to->addr_len != from->addr_len)
4078 return -EINVAL;
4079
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004080 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004081 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004082 if (!err)
4083 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004084 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004085 return err;
4086}
4087EXPORT_SYMBOL(dev_unicast_sync);
4088
4089/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004090 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004091 * @to: destination device
4092 * @from: source device
4093 *
4094 * Remove all addresses that were added to the destination device by
4095 * dev_unicast_sync(). This function is intended to be called from the
4096 * dev->stop function of layered software devices.
4097 */
4098void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4099{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004100 if (to->addr_len != from->addr_len)
4101 return;
4102
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004103 netif_addr_lock_bh(from);
4104 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004105 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004106 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004107 netif_addr_unlock(to);
4108 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004109}
4110EXPORT_SYMBOL(dev_unicast_unsync);
4111
Jiri Pirkoccffad252009-05-22 23:22:17 +00004112static void dev_unicast_flush(struct net_device *dev)
4113{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004114 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004115 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004116 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004117}
4118
4119static void dev_unicast_init(struct net_device *dev)
4120{
Jiri Pirko31278e72009-06-17 01:12:19 +00004121 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004122}
4123
4124
Denis Cheng12972622007-07-18 02:12:56 -07004125static void __dev_addr_discard(struct dev_addr_list **list)
4126{
4127 struct dev_addr_list *tmp;
4128
4129 while (*list != NULL) {
4130 tmp = *list;
4131 *list = tmp->next;
4132 if (tmp->da_users > tmp->da_gusers)
4133 printk("__dev_addr_discard: address leakage! "
4134 "da_users=%d\n", tmp->da_users);
4135 kfree(tmp);
4136 }
4137}
4138
Denis Cheng26cc2522007-07-18 02:12:03 -07004139static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004140{
David S. Millerb9e40852008-07-15 00:15:08 -07004141 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004142
Denis Cheng456ad752007-07-18 02:10:54 -07004143 __dev_addr_discard(&dev->mc_list);
4144 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004145
David S. Millerb9e40852008-07-15 00:15:08 -07004146 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004147}
4148
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004149/**
4150 * dev_get_flags - get flags reported to userspace
4151 * @dev: device
4152 *
4153 * Get the combination of flag bits exported through APIs to userspace.
4154 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155unsigned dev_get_flags(const struct net_device *dev)
4156{
4157 unsigned flags;
4158
4159 flags = (dev->flags & ~(IFF_PROMISC |
4160 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004161 IFF_RUNNING |
4162 IFF_LOWER_UP |
4163 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 (dev->gflags & (IFF_PROMISC |
4165 IFF_ALLMULTI));
4166
Stefan Rompfb00055a2006-03-20 17:09:11 -08004167 if (netif_running(dev)) {
4168 if (netif_oper_up(dev))
4169 flags |= IFF_RUNNING;
4170 if (netif_carrier_ok(dev))
4171 flags |= IFF_LOWER_UP;
4172 if (netif_dormant(dev))
4173 flags |= IFF_DORMANT;
4174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175
4176 return flags;
4177}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004178EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004180/**
4181 * dev_change_flags - change device settings
4182 * @dev: device
4183 * @flags: device state flags
4184 *
4185 * Change settings on device based state flags. The flags are
4186 * in the userspace exported format.
4187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188int dev_change_flags(struct net_device *dev, unsigned flags)
4189{
Thomas Graf7c355f52007-06-05 16:03:03 -07004190 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 int old_flags = dev->flags;
4192
Patrick McHardy24023452007-07-14 18:51:31 -07004193 ASSERT_RTNL();
4194
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 /*
4196 * Set the flags on our device.
4197 */
4198
4199 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4200 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4201 IFF_AUTOMEDIA)) |
4202 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4203 IFF_ALLMULTI));
4204
4205 /*
4206 * Load in the correct multicast list now the flags have changed.
4207 */
4208
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004209 if ((old_flags ^ flags) & IFF_MULTICAST)
4210 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004211
Patrick McHardy4417da62007-06-27 01:28:10 -07004212 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213
4214 /*
4215 * Have we downed the interface. We handle IFF_UP ourselves
4216 * according to user attempts to set it, rather than blindly
4217 * setting it.
4218 */
4219
4220 ret = 0;
4221 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4222 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4223
4224 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004225 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 }
4227
4228 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004229 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004231 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
4233 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004234 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4235
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 dev->gflags ^= IFF_PROMISC;
4237 dev_set_promiscuity(dev, inc);
4238 }
4239
4240 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4241 is important. Some (broken) drivers set IFF_PROMISC, when
4242 IFF_ALLMULTI is requested not asking us and not reporting.
4243 */
4244 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004245 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 dev->gflags ^= IFF_ALLMULTI;
4248 dev_set_allmulti(dev, inc);
4249 }
4250
Thomas Graf7c355f52007-06-05 16:03:03 -07004251 /* Exclude state transition flags, already notified */
4252 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4253 if (changes)
4254 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
4256 return ret;
4257}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004258EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004260/**
4261 * dev_set_mtu - Change maximum transfer unit
4262 * @dev: device
4263 * @new_mtu: new transfer unit
4264 *
4265 * Change the maximum transfer size of the network device.
4266 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267int dev_set_mtu(struct net_device *dev, int new_mtu)
4268{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004269 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 int err;
4271
4272 if (new_mtu == dev->mtu)
4273 return 0;
4274
4275 /* MTU must be positive. */
4276 if (new_mtu < 0)
4277 return -EINVAL;
4278
4279 if (!netif_device_present(dev))
4280 return -ENODEV;
4281
4282 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004283 if (ops->ndo_change_mtu)
4284 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 else
4286 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004287
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004289 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 return err;
4291}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004292EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004294/**
4295 * dev_set_mac_address - Change Media Access Control Address
4296 * @dev: device
4297 * @sa: new address
4298 *
4299 * Change the hardware (MAC) address of the device
4300 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4302{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004303 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 int err;
4305
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004306 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 return -EOPNOTSUPP;
4308 if (sa->sa_family != dev->type)
4309 return -EINVAL;
4310 if (!netif_device_present(dev))
4311 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004312 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004314 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 return err;
4316}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004317EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318
4319/*
Eric Dumazet3710bec2009-11-01 19:42:09 +00004320 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004322static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323{
4324 int err;
Eric Dumazet3710bec2009-11-01 19:42:09 +00004325 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326
4327 if (!dev)
4328 return -ENODEV;
4329
4330 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004331 case SIOCGIFFLAGS: /* Get interface flags */
4332 ifr->ifr_flags = (short) dev_get_flags(dev);
4333 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004335 case SIOCGIFMETRIC: /* Get the metric on the interface
4336 (currently unused) */
4337 ifr->ifr_metric = 0;
4338 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004340 case SIOCGIFMTU: /* Get the MTU of a device */
4341 ifr->ifr_mtu = dev->mtu;
4342 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004344 case SIOCGIFHWADDR:
4345 if (!dev->addr_len)
4346 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4347 else
4348 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4349 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4350 ifr->ifr_hwaddr.sa_family = dev->type;
4351 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004353 case SIOCGIFSLAVE:
4354 err = -EINVAL;
4355 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004356
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004357 case SIOCGIFMAP:
4358 ifr->ifr_map.mem_start = dev->mem_start;
4359 ifr->ifr_map.mem_end = dev->mem_end;
4360 ifr->ifr_map.base_addr = dev->base_addr;
4361 ifr->ifr_map.irq = dev->irq;
4362 ifr->ifr_map.dma = dev->dma;
4363 ifr->ifr_map.port = dev->if_port;
4364 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004365
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004366 case SIOCGIFINDEX:
4367 ifr->ifr_ifindex = dev->ifindex;
4368 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004369
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004370 case SIOCGIFTXQLEN:
4371 ifr->ifr_qlen = dev->tx_queue_len;
4372 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004373
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004374 default:
4375 /* dev_ioctl() should ensure this case
4376 * is never reached
4377 */
4378 WARN_ON(1);
4379 err = -EINVAL;
4380 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004381
4382 }
4383 return err;
4384}
4385
4386/*
4387 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4388 */
4389static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4390{
4391 int err;
4392 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004393 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004394
4395 if (!dev)
4396 return -ENODEV;
4397
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004398 ops = dev->netdev_ops;
4399
Jeff Garzik14e3e072007-10-08 00:06:32 -07004400 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004401 case SIOCSIFFLAGS: /* Set interface flags */
4402 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004403
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004404 case SIOCSIFMETRIC: /* Set the metric on the interface
4405 (currently unused) */
4406 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004407
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004408 case SIOCSIFMTU: /* Set the MTU of a device */
4409 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004410
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004411 case SIOCSIFHWADDR:
4412 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004413
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004414 case SIOCSIFHWBROADCAST:
4415 if (ifr->ifr_hwaddr.sa_family != dev->type)
4416 return -EINVAL;
4417 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4418 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4419 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4420 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004422 case SIOCSIFMAP:
4423 if (ops->ndo_set_config) {
4424 if (!netif_device_present(dev))
4425 return -ENODEV;
4426 return ops->ndo_set_config(dev, &ifr->ifr_map);
4427 }
4428 return -EOPNOTSUPP;
4429
4430 case SIOCADDMULTI:
4431 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4432 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4433 return -EINVAL;
4434 if (!netif_device_present(dev))
4435 return -ENODEV;
4436 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4437 dev->addr_len, 1);
4438
4439 case SIOCDELMULTI:
4440 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4441 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4442 return -EINVAL;
4443 if (!netif_device_present(dev))
4444 return -ENODEV;
4445 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4446 dev->addr_len, 1);
4447
4448 case SIOCSIFTXQLEN:
4449 if (ifr->ifr_qlen < 0)
4450 return -EINVAL;
4451 dev->tx_queue_len = ifr->ifr_qlen;
4452 return 0;
4453
4454 case SIOCSIFNAME:
4455 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4456 return dev_change_name(dev, ifr->ifr_newname);
4457
4458 /*
4459 * Unknown or private ioctl
4460 */
4461 default:
4462 if ((cmd >= SIOCDEVPRIVATE &&
4463 cmd <= SIOCDEVPRIVATE + 15) ||
4464 cmd == SIOCBONDENSLAVE ||
4465 cmd == SIOCBONDRELEASE ||
4466 cmd == SIOCBONDSETHWADDR ||
4467 cmd == SIOCBONDSLAVEINFOQUERY ||
4468 cmd == SIOCBONDINFOQUERY ||
4469 cmd == SIOCBONDCHANGEACTIVE ||
4470 cmd == SIOCGMIIPHY ||
4471 cmd == SIOCGMIIREG ||
4472 cmd == SIOCSMIIREG ||
4473 cmd == SIOCBRADDIF ||
4474 cmd == SIOCBRDELIF ||
4475 cmd == SIOCSHWTSTAMP ||
4476 cmd == SIOCWANDEV) {
4477 err = -EOPNOTSUPP;
4478 if (ops->ndo_do_ioctl) {
4479 if (netif_device_present(dev))
4480 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4481 else
4482 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004484 } else
4485 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486
4487 }
4488 return err;
4489}
4490
4491/*
4492 * This function handles all "interface"-type I/O control requests. The actual
4493 * 'doing' part of this is dev_ifsioc above.
4494 */
4495
4496/**
4497 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004498 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 * @cmd: command to issue
4500 * @arg: pointer to a struct ifreq in user space
4501 *
4502 * Issue ioctl functions to devices. This is normally called by the
4503 * user space syscall interfaces but can sometimes be useful for
4504 * other purposes. The return value is the return from the syscall if
4505 * positive or a negative errno code on error.
4506 */
4507
Eric W. Biederman881d9662007-09-17 11:56:21 -07004508int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509{
4510 struct ifreq ifr;
4511 int ret;
4512 char *colon;
4513
4514 /* One special case: SIOCGIFCONF takes ifconf argument
4515 and requires shared lock, because it sleeps writing
4516 to user space.
4517 */
4518
4519 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004520 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004521 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004522 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 return ret;
4524 }
4525 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004526 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004527
4528 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4529 return -EFAULT;
4530
4531 ifr.ifr_name[IFNAMSIZ-1] = 0;
4532
4533 colon = strchr(ifr.ifr_name, ':');
4534 if (colon)
4535 *colon = 0;
4536
4537 /*
4538 * See which interface the caller is talking about.
4539 */
4540
4541 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004542 /*
4543 * These ioctl calls:
4544 * - can be done by all.
4545 * - atomic and do not require locking.
4546 * - return a value
4547 */
4548 case SIOCGIFFLAGS:
4549 case SIOCGIFMETRIC:
4550 case SIOCGIFMTU:
4551 case SIOCGIFHWADDR:
4552 case SIOCGIFSLAVE:
4553 case SIOCGIFMAP:
4554 case SIOCGIFINDEX:
4555 case SIOCGIFTXQLEN:
4556 dev_load(net, ifr.ifr_name);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004557 rcu_read_lock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004558 ret = dev_ifsioc_locked(net, &ifr, cmd);
Eric Dumazet3710bec2009-11-01 19:42:09 +00004559 rcu_read_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004560 if (!ret) {
4561 if (colon)
4562 *colon = ':';
4563 if (copy_to_user(arg, &ifr,
4564 sizeof(struct ifreq)))
4565 ret = -EFAULT;
4566 }
4567 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004569 case SIOCETHTOOL:
4570 dev_load(net, ifr.ifr_name);
4571 rtnl_lock();
4572 ret = dev_ethtool(net, &ifr);
4573 rtnl_unlock();
4574 if (!ret) {
4575 if (colon)
4576 *colon = ':';
4577 if (copy_to_user(arg, &ifr,
4578 sizeof(struct ifreq)))
4579 ret = -EFAULT;
4580 }
4581 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004583 /*
4584 * These ioctl calls:
4585 * - require superuser power.
4586 * - require strict serialization.
4587 * - return a value
4588 */
4589 case SIOCGMIIPHY:
4590 case SIOCGMIIREG:
4591 case SIOCSIFNAME:
4592 if (!capable(CAP_NET_ADMIN))
4593 return -EPERM;
4594 dev_load(net, ifr.ifr_name);
4595 rtnl_lock();
4596 ret = dev_ifsioc(net, &ifr, cmd);
4597 rtnl_unlock();
4598 if (!ret) {
4599 if (colon)
4600 *colon = ':';
4601 if (copy_to_user(arg, &ifr,
4602 sizeof(struct ifreq)))
4603 ret = -EFAULT;
4604 }
4605 return ret;
4606
4607 /*
4608 * These ioctl calls:
4609 * - require superuser power.
4610 * - require strict serialization.
4611 * - do not return a value
4612 */
4613 case SIOCSIFFLAGS:
4614 case SIOCSIFMETRIC:
4615 case SIOCSIFMTU:
4616 case SIOCSIFMAP:
4617 case SIOCSIFHWADDR:
4618 case SIOCSIFSLAVE:
4619 case SIOCADDMULTI:
4620 case SIOCDELMULTI:
4621 case SIOCSIFHWBROADCAST:
4622 case SIOCSIFTXQLEN:
4623 case SIOCSMIIREG:
4624 case SIOCBONDENSLAVE:
4625 case SIOCBONDRELEASE:
4626 case SIOCBONDSETHWADDR:
4627 case SIOCBONDCHANGEACTIVE:
4628 case SIOCBRADDIF:
4629 case SIOCBRDELIF:
4630 case SIOCSHWTSTAMP:
4631 if (!capable(CAP_NET_ADMIN))
4632 return -EPERM;
4633 /* fall through */
4634 case SIOCBONDSLAVEINFOQUERY:
4635 case SIOCBONDINFOQUERY:
4636 dev_load(net, ifr.ifr_name);
4637 rtnl_lock();
4638 ret = dev_ifsioc(net, &ifr, cmd);
4639 rtnl_unlock();
4640 return ret;
4641
4642 case SIOCGIFMEM:
4643 /* Get the per device memory space. We can add this but
4644 * currently do not support it */
4645 case SIOCSIFMEM:
4646 /* Set the per device memory buffer space.
4647 * Not applicable in our case */
4648 case SIOCSIFLINK:
4649 return -EINVAL;
4650
4651 /*
4652 * Unknown or private ioctl.
4653 */
4654 default:
4655 if (cmd == SIOCWANDEV ||
4656 (cmd >= SIOCDEVPRIVATE &&
4657 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004658 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004660 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004662 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004664 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004666 }
4667 /* Take care of Wireless Extensions */
4668 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4669 return wext_handle_ioctl(net, &ifr, cmd, arg);
4670 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 }
4672}
4673
4674
4675/**
4676 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004677 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 *
4679 * Returns a suitable unique value for a new device interface
4680 * number. The caller must hold the rtnl semaphore or the
4681 * dev_base_lock to be sure it remains unique.
4682 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004683static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684{
4685 static int ifindex;
4686 for (;;) {
4687 if (++ifindex <= 0)
4688 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004689 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 return ifindex;
4691 }
4692}
4693
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004695static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004697static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700}
4701
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004702static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004703{
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004704 struct net_device *dev;
4705
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004706 BUG_ON(dev_boot_phase);
4707 ASSERT_RTNL();
4708
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004709 list_for_each_entry(dev, head, unreg_list) {
4710 /* Some devices call without registering
4711 * for initialization unwind.
4712 */
4713 if (dev->reg_state == NETREG_UNINITIALIZED) {
4714 pr_debug("unregister_netdevice: device %s/%p never "
4715 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004716
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004717 WARN_ON(1);
4718 return;
4719 }
4720
4721 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4722
4723 /* If device is running, close it first. */
4724 dev_close(dev);
4725
4726 /* And unlink it from device chain. */
4727 unlist_netdevice(dev);
4728
4729 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004730 }
4731
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004732 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004733
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004734 list_for_each_entry(dev, head, unreg_list) {
4735 /* Shutdown queueing discipline. */
4736 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004737
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004738
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004739 /* Notify protocols, that we are about to destroy
4740 this device. They should clean all the things.
4741 */
4742 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4743
4744 /*
4745 * Flush the unicast and multicast chains
4746 */
4747 dev_unicast_flush(dev);
4748 dev_addr_discard(dev);
4749
4750 if (dev->netdev_ops->ndo_uninit)
4751 dev->netdev_ops->ndo_uninit(dev);
4752
4753 /* Notifier chain MUST detach us from master device. */
4754 WARN_ON(dev->master);
4755
4756 /* Remove entries from kobject tree */
4757 netdev_unregister_kobject(dev);
4758 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004759
4760 synchronize_net();
4761
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004762 list_for_each_entry(dev, head, unreg_list)
4763 dev_put(dev);
4764}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004765
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004766static void rollback_registered(struct net_device *dev)
4767{
4768 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004769
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004770 list_add(&dev->unreg_list, &single);
4771 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004772}
4773
David S. Millere8a04642008-07-17 00:34:19 -07004774static void __netdev_init_queue_locks_one(struct net_device *dev,
4775 struct netdev_queue *dev_queue,
4776 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004777{
4778 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004779 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004780 dev_queue->xmit_lock_owner = -1;
4781}
4782
4783static void netdev_init_queue_locks(struct net_device *dev)
4784{
David S. Millere8a04642008-07-17 00:34:19 -07004785 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4786 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004787}
4788
Herbert Xub63365a2008-10-23 01:11:29 -07004789unsigned long netdev_fix_features(unsigned long features, const char *name)
4790{
4791 /* Fix illegal SG+CSUM combinations. */
4792 if ((features & NETIF_F_SG) &&
4793 !(features & NETIF_F_ALL_CSUM)) {
4794 if (name)
4795 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4796 "checksum feature.\n", name);
4797 features &= ~NETIF_F_SG;
4798 }
4799
4800 /* TSO requires that SG is present as well. */
4801 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4802 if (name)
4803 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4804 "SG feature.\n", name);
4805 features &= ~NETIF_F_TSO;
4806 }
4807
4808 if (features & NETIF_F_UFO) {
4809 if (!(features & NETIF_F_GEN_CSUM)) {
4810 if (name)
4811 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4812 "since no NETIF_F_HW_CSUM feature.\n",
4813 name);
4814 features &= ~NETIF_F_UFO;
4815 }
4816
4817 if (!(features & NETIF_F_SG)) {
4818 if (name)
4819 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4820 "since no NETIF_F_SG feature.\n", name);
4821 features &= ~NETIF_F_UFO;
4822 }
4823 }
4824
4825 return features;
4826}
4827EXPORT_SYMBOL(netdev_fix_features);
4828
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829/**
4830 * register_netdevice - register a network device
4831 * @dev: device to register
4832 *
4833 * Take a completed network device structure and add it to the kernel
4834 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4835 * chain. 0 is returned on success. A negative errno code is returned
4836 * on a failure to set up the device, or if the name is a duplicate.
4837 *
4838 * Callers must hold the rtnl semaphore. You may want
4839 * register_netdev() instead of this.
4840 *
4841 * BUGS:
4842 * The locking appears insufficient to guarantee two parallel registers
4843 * will not get the same name.
4844 */
4845
4846int register_netdevice(struct net_device *dev)
4847{
4848 struct hlist_head *head;
4849 struct hlist_node *p;
4850 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004851 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852
4853 BUG_ON(dev_boot_phase);
4854 ASSERT_RTNL();
4855
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004856 might_sleep();
4857
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858 /* When net_device's are persistent, this will be fatal. */
4859 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004860 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861
David S. Millerf1f28aa2008-07-15 00:08:33 -07004862 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004863 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004864 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 dev->iflink = -1;
4867
4868 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004869 if (dev->netdev_ops->ndo_init) {
4870 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871 if (ret) {
4872 if (ret > 0)
4873 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004874 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875 }
4876 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004877
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 if (!dev_valid_name(dev->name)) {
4879 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004880 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881 }
4882
Eric W. Biederman881d9662007-09-17 11:56:21 -07004883 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 if (dev->iflink == -1)
4885 dev->iflink = dev->ifindex;
4886
4887 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004888 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889 hlist_for_each(p, head) {
4890 struct net_device *d
4891 = hlist_entry(p, struct net_device, name_hlist);
4892 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4893 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004894 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004898 /* Fix illegal checksum combinations */
4899 if ((dev->features & NETIF_F_HW_CSUM) &&
4900 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4901 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4902 dev->name);
4903 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4904 }
4905
4906 if ((dev->features & NETIF_F_NO_CSUM) &&
4907 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4908 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4909 dev->name);
4910 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4911 }
4912
Herbert Xub63365a2008-10-23 01:11:29 -07004913 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004915 /* Enable software GSO if SG is supported. */
4916 if (dev->features & NETIF_F_SG)
4917 dev->features |= NETIF_F_GSO;
4918
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004919 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004920
4921 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4922 ret = notifier_to_errno(ret);
4923 if (ret)
4924 goto err_uninit;
4925
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004926 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004927 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004928 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004929 dev->reg_state = NETREG_REGISTERED;
4930
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931 /*
4932 * Default initial state at registry is that the
4933 * device is present.
4934 */
4935
4936 set_bit(__LINK_STATE_PRESENT, &dev->state);
4937
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004940 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941
4942 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004943 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004944 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004945 if (ret) {
4946 rollback_registered(dev);
4947 dev->reg_state = NETREG_UNREGISTERED;
4948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949
4950out:
4951 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004952
4953err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004954 if (dev->netdev_ops->ndo_uninit)
4955 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004956 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004958EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959
4960/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004961 * init_dummy_netdev - init a dummy network device for NAPI
4962 * @dev: device to init
4963 *
4964 * This takes a network device structure and initialize the minimum
4965 * amount of fields so it can be used to schedule NAPI polls without
4966 * registering a full blown interface. This is to be used by drivers
4967 * that need to tie several hardware interfaces to a single NAPI
4968 * poll scheduler due to HW limitations.
4969 */
4970int init_dummy_netdev(struct net_device *dev)
4971{
4972 /* Clear everything. Note we don't initialize spinlocks
4973 * are they aren't supposed to be taken by any of the
4974 * NAPI code and this dummy netdev is supposed to be
4975 * only ever used for NAPI polls
4976 */
4977 memset(dev, 0, sizeof(struct net_device));
4978
4979 /* make sure we BUG if trying to hit standard
4980 * register/unregister code path
4981 */
4982 dev->reg_state = NETREG_DUMMY;
4983
4984 /* initialize the ref count */
4985 atomic_set(&dev->refcnt, 1);
4986
4987 /* NAPI wants this */
4988 INIT_LIST_HEAD(&dev->napi_list);
4989
4990 /* a dummy interface is started by default */
4991 set_bit(__LINK_STATE_PRESENT, &dev->state);
4992 set_bit(__LINK_STATE_START, &dev->state);
4993
4994 return 0;
4995}
4996EXPORT_SYMBOL_GPL(init_dummy_netdev);
4997
4998
4999/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 * register_netdev - register a network device
5001 * @dev: device to register
5002 *
5003 * Take a completed network device structure and add it to the kernel
5004 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5005 * chain. 0 is returned on success. A negative errno code is returned
5006 * on a failure to set up the device, or if the name is a duplicate.
5007 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005008 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 * and expands the device name if you passed a format string to
5010 * alloc_netdev.
5011 */
5012int register_netdev(struct net_device *dev)
5013{
5014 int err;
5015
5016 rtnl_lock();
5017
5018 /*
5019 * If the name is a format string the caller wants us to do a
5020 * name allocation.
5021 */
5022 if (strchr(dev->name, '%')) {
5023 err = dev_alloc_name(dev, dev->name);
5024 if (err < 0)
5025 goto out;
5026 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005027
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 err = register_netdevice(dev);
5029out:
5030 rtnl_unlock();
5031 return err;
5032}
5033EXPORT_SYMBOL(register_netdev);
5034
5035/*
5036 * netdev_wait_allrefs - wait until all references are gone.
5037 *
5038 * This is called when unregistering network devices.
5039 *
5040 * Any protocol or device that holds a reference should register
5041 * for netdevice notification, and cleanup and put back the
5042 * reference if they receive an UNREGISTER event.
5043 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005044 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045 */
5046static void netdev_wait_allrefs(struct net_device *dev)
5047{
5048 unsigned long rebroadcast_time, warning_time;
5049
5050 rebroadcast_time = warning_time = jiffies;
5051 while (atomic_read(&dev->refcnt) != 0) {
5052 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005053 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054
5055 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005056 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
5058 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5059 &dev->state)) {
5060 /* We must not have linkwatch events
5061 * pending on unregister. If this
5062 * happens, we simply run the queue
5063 * unscheduled, resulting in a noop
5064 * for this device.
5065 */
5066 linkwatch_run_queue();
5067 }
5068
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005069 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
5071 rebroadcast_time = jiffies;
5072 }
5073
5074 msleep(250);
5075
5076 if (time_after(jiffies, warning_time + 10 * HZ)) {
5077 printk(KERN_EMERG "unregister_netdevice: "
5078 "waiting for %s to become free. Usage "
5079 "count = %d\n",
5080 dev->name, atomic_read(&dev->refcnt));
5081 warning_time = jiffies;
5082 }
5083 }
5084}
5085
5086/* The sequence is:
5087 *
5088 * rtnl_lock();
5089 * ...
5090 * register_netdevice(x1);
5091 * register_netdevice(x2);
5092 * ...
5093 * unregister_netdevice(y1);
5094 * unregister_netdevice(y2);
5095 * ...
5096 * rtnl_unlock();
5097 * free_netdev(y1);
5098 * free_netdev(y2);
5099 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005100 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005102 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103 * without deadlocking with linkwatch via keventd.
5104 * 2) Since we run with the RTNL semaphore not held, we can sleep
5105 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005106 *
5107 * We must not return until all unregister events added during
5108 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110void netdev_run_todo(void)
5111{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005112 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005115 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005116
5117 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005118
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 while (!list_empty(&list)) {
5120 struct net_device *dev
5121 = list_entry(list.next, struct net_device, todo_list);
5122 list_del(&dev->todo_list);
5123
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005124 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125 printk(KERN_ERR "network todo '%s' but state %d\n",
5126 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005127 dump_stack();
5128 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005130
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005131 dev->reg_state = NETREG_UNREGISTERED;
5132
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005133 on_each_cpu(flush_backlog, dev, 1);
5134
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005135 netdev_wait_allrefs(dev);
5136
5137 /* paranoia */
5138 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005139 WARN_ON(dev->ip_ptr);
5140 WARN_ON(dev->ip6_ptr);
5141 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005142
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005143 if (dev->destructor)
5144 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005145
5146 /* Free network device */
5147 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149}
5150
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005151/**
5152 * dev_get_stats - get network device statistics
5153 * @dev: device to get statistics from
5154 *
5155 * Get network statistics from device. The device driver may provide
5156 * its own method by setting dev->netdev_ops->get_stats; otherwise
5157 * the internal statistics structure is used.
5158 */
5159const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005160{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005161 const struct net_device_ops *ops = dev->netdev_ops;
5162
5163 if (ops->ndo_get_stats)
5164 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005165 else {
5166 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5167 struct net_device_stats *stats = &dev->stats;
5168 unsigned int i;
5169 struct netdev_queue *txq;
5170
5171 for (i = 0; i < dev->num_tx_queues; i++) {
5172 txq = netdev_get_tx_queue(dev, i);
5173 tx_bytes += txq->tx_bytes;
5174 tx_packets += txq->tx_packets;
5175 tx_dropped += txq->tx_dropped;
5176 }
5177 if (tx_bytes || tx_packets || tx_dropped) {
5178 stats->tx_bytes = tx_bytes;
5179 stats->tx_packets = tx_packets;
5180 stats->tx_dropped = tx_dropped;
5181 }
5182 return stats;
5183 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005184}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005185EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005186
David S. Millerdc2b4842008-07-08 17:18:23 -07005187static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005188 struct netdev_queue *queue,
5189 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005190{
David S. Millerdc2b4842008-07-08 17:18:23 -07005191 queue->dev = dev;
5192}
5193
David S. Millerbb949fb2008-07-08 16:55:56 -07005194static void netdev_init_queues(struct net_device *dev)
5195{
David S. Millere8a04642008-07-17 00:34:19 -07005196 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5197 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005198 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005199}
5200
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005202 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203 * @sizeof_priv: size of private data to allocate space for
5204 * @name: device name format string
5205 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005206 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207 *
5208 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005209 * and performs basic initialization. Also allocates subquue structs
5210 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005212struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5213 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214{
David S. Millere8a04642008-07-17 00:34:19 -07005215 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005217 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005218 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005220 BUG_ON(strlen(name) >= sizeof(dev->name));
5221
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005222 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005223 if (sizeof_priv) {
5224 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005225 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005226 alloc_size += sizeof_priv;
5227 }
5228 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005229 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005231 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005233 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 return NULL;
5235 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236
Stephen Hemminger79439862008-07-21 13:28:44 -07005237 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005238 if (!tx) {
5239 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5240 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005241 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005242 }
5243
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005244 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005246
5247 if (dev_addr_init(dev))
5248 goto free_tx;
5249
Jiri Pirkoccffad252009-05-22 23:22:17 +00005250 dev_unicast_init(dev);
5251
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005252 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253
David S. Millere8a04642008-07-17 00:34:19 -07005254 dev->_tx = tx;
5255 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005256 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005257
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005258 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259
David S. Millerbb949fb2008-07-08 16:55:56 -07005260 netdev_init_queues(dev);
5261
Herbert Xud565b0a2008-12-15 23:38:52 -08005262 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005263 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005264 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265 setup(dev);
5266 strcpy(dev->name, name);
5267 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005268
5269free_tx:
5270 kfree(tx);
5271
5272free_p:
5273 kfree(p);
5274 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005276EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277
5278/**
5279 * free_netdev - free network device
5280 * @dev: device
5281 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005282 * This function does the last stage of destroying an allocated device
5283 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005284 * If this is the last reference then it will be freed.
5285 */
5286void free_netdev(struct net_device *dev)
5287{
Herbert Xud565b0a2008-12-15 23:38:52 -08005288 struct napi_struct *p, *n;
5289
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005290 release_net(dev_net(dev));
5291
David S. Millere8a04642008-07-17 00:34:19 -07005292 kfree(dev->_tx);
5293
Jiri Pirkof001fde2009-05-05 02:48:28 +00005294 /* Flush device addresses */
5295 dev_addr_flush(dev);
5296
Herbert Xud565b0a2008-12-15 23:38:52 -08005297 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5298 netif_napi_del(p);
5299
Stephen Hemminger3041a062006-05-26 13:25:24 -07005300 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301 if (dev->reg_state == NETREG_UNINITIALIZED) {
5302 kfree((char *)dev - dev->padded);
5303 return;
5304 }
5305
5306 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5307 dev->reg_state = NETREG_RELEASED;
5308
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005309 /* will free via device release */
5310 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005311}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005312EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005313
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005314/**
5315 * synchronize_net - Synchronize with packet receive processing
5316 *
5317 * Wait for packets currently being received to be done.
5318 * Does not block later packets from starting.
5319 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005320void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005321{
5322 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005323 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005325EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005326
5327/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005328 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005330 * @head: list
5331
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005333 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005334 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 *
5336 * Callers must hold the rtnl semaphore. You may want
5337 * unregister_netdev() instead of this.
5338 */
5339
Eric Dumazet44a08732009-10-27 07:03:04 +00005340void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341{
Herbert Xua6620712007-12-12 19:21:56 -08005342 ASSERT_RTNL();
5343
Eric Dumazet44a08732009-10-27 07:03:04 +00005344 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005345 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005346 } else {
5347 rollback_registered(dev);
5348 /* Finish processing unregister after unlock */
5349 net_set_todo(dev);
5350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351}
Eric Dumazet44a08732009-10-27 07:03:04 +00005352EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353
5354/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005355 * unregister_netdevice_many - unregister many devices
5356 * @head: list of devices
5357 *
5358 */
5359void unregister_netdevice_many(struct list_head *head)
5360{
5361 struct net_device *dev;
5362
5363 if (!list_empty(head)) {
5364 rollback_registered_many(head);
5365 list_for_each_entry(dev, head, unreg_list)
5366 net_set_todo(dev);
5367 }
5368}
Eric Dumazet63c80992009-10-27 07:06:49 +00005369EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005370
5371/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372 * unregister_netdev - remove device from the kernel
5373 * @dev: device
5374 *
5375 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005376 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 *
5378 * This is just a wrapper for unregister_netdevice that takes
5379 * the rtnl semaphore. In general you want to use this and not
5380 * unregister_netdevice.
5381 */
5382void unregister_netdev(struct net_device *dev)
5383{
5384 rtnl_lock();
5385 unregister_netdevice(dev);
5386 rtnl_unlock();
5387}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388EXPORT_SYMBOL(unregister_netdev);
5389
Eric W. Biedermance286d32007-09-12 13:53:49 +02005390/**
5391 * dev_change_net_namespace - move device to different nethost namespace
5392 * @dev: device
5393 * @net: network namespace
5394 * @pat: If not NULL name pattern to try if the current device name
5395 * is already taken in the destination network namespace.
5396 *
5397 * This function shuts down a device interface and moves it
5398 * to a new network namespace. On success 0 is returned, on
5399 * a failure a netagive errno code is returned.
5400 *
5401 * Callers must hold the rtnl semaphore.
5402 */
5403
5404int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5405{
5406 char buf[IFNAMSIZ];
5407 const char *destname;
5408 int err;
5409
5410 ASSERT_RTNL();
5411
5412 /* Don't allow namespace local devices to be moved. */
5413 err = -EINVAL;
5414 if (dev->features & NETIF_F_NETNS_LOCAL)
5415 goto out;
5416
Eric W. Biederman38918452008-10-27 17:51:47 -07005417#ifdef CONFIG_SYSFS
5418 /* Don't allow real devices to be moved when sysfs
5419 * is enabled.
5420 */
5421 err = -EINVAL;
5422 if (dev->dev.parent)
5423 goto out;
5424#endif
5425
Eric W. Biedermance286d32007-09-12 13:53:49 +02005426 /* Ensure the device has been registrered */
5427 err = -EINVAL;
5428 if (dev->reg_state != NETREG_REGISTERED)
5429 goto out;
5430
5431 /* Get out if there is nothing todo */
5432 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005433 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005434 goto out;
5435
5436 /* Pick the destination device name, and ensure
5437 * we can use it in the destination network namespace.
5438 */
5439 err = -EEXIST;
5440 destname = dev->name;
5441 if (__dev_get_by_name(net, destname)) {
5442 /* We get here if we can't use the current device name */
5443 if (!pat)
5444 goto out;
5445 if (!dev_valid_name(pat))
5446 goto out;
5447 if (strchr(pat, '%')) {
5448 if (__dev_alloc_name(net, pat, buf) < 0)
5449 goto out;
5450 destname = buf;
5451 } else
5452 destname = pat;
5453 if (__dev_get_by_name(net, destname))
5454 goto out;
5455 }
5456
5457 /*
5458 * And now a mini version of register_netdevice unregister_netdevice.
5459 */
5460
5461 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005462 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005463
5464 /* And unlink it from device chain */
5465 err = -ENODEV;
5466 unlist_netdevice(dev);
5467
5468 synchronize_net();
5469
5470 /* Shutdown queueing discipline. */
5471 dev_shutdown(dev);
5472
5473 /* Notify protocols, that we are about to destroy
5474 this device. They should clean all the things.
5475 */
5476 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5477
5478 /*
5479 * Flush the unicast and multicast chains
5480 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005481 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005482 dev_addr_discard(dev);
5483
Eric W. Biederman38918452008-10-27 17:51:47 -07005484 netdev_unregister_kobject(dev);
5485
Eric W. Biedermance286d32007-09-12 13:53:49 +02005486 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005487 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005488
5489 /* Assign the new device name */
5490 if (destname != dev->name)
5491 strcpy(dev->name, destname);
5492
5493 /* If there is an ifindex conflict assign a new one */
5494 if (__dev_get_by_index(net, dev->ifindex)) {
5495 int iflink = (dev->iflink == dev->ifindex);
5496 dev->ifindex = dev_new_index(net);
5497 if (iflink)
5498 dev->iflink = dev->ifindex;
5499 }
5500
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005501 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005502 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005503 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005504
5505 /* Add the device back in the hashes */
5506 list_netdevice(dev);
5507
5508 /* Notify protocols, that a new device appeared. */
5509 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5510
5511 synchronize_net();
5512 err = 0;
5513out:
5514 return err;
5515}
Johannes Berg463d0182009-07-14 00:33:35 +02005516EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005517
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518static int dev_cpu_callback(struct notifier_block *nfb,
5519 unsigned long action,
5520 void *ocpu)
5521{
5522 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005523 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524 struct sk_buff *skb;
5525 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5526 struct softnet_data *sd, *oldsd;
5527
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005528 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005529 return NOTIFY_OK;
5530
5531 local_irq_disable();
5532 cpu = smp_processor_id();
5533 sd = &per_cpu(softnet_data, cpu);
5534 oldsd = &per_cpu(softnet_data, oldcpu);
5535
5536 /* Find end of our completion_queue. */
5537 list_skb = &sd->completion_queue;
5538 while (*list_skb)
5539 list_skb = &(*list_skb)->next;
5540 /* Append completion queue from offline CPU. */
5541 *list_skb = oldsd->completion_queue;
5542 oldsd->completion_queue = NULL;
5543
5544 /* Find end of our output_queue. */
5545 list_net = &sd->output_queue;
5546 while (*list_net)
5547 list_net = &(*list_net)->next_sched;
5548 /* Append output queue from offline CPU. */
5549 *list_net = oldsd->output_queue;
5550 oldsd->output_queue = NULL;
5551
5552 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5553 local_irq_enable();
5554
5555 /* Process offline CPU's input_pkt_queue */
5556 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5557 netif_rx(skb);
5558
5559 return NOTIFY_OK;
5560}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561
5562
Herbert Xu7f353bf2007-08-10 15:47:58 -07005563/**
Herbert Xub63365a2008-10-23 01:11:29 -07005564 * netdev_increment_features - increment feature set by one
5565 * @all: current feature set
5566 * @one: new feature set
5567 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005568 *
5569 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005570 * @one to the master device with current feature set @all. Will not
5571 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005572 */
Herbert Xub63365a2008-10-23 01:11:29 -07005573unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5574 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005575{
Herbert Xub63365a2008-10-23 01:11:29 -07005576 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005577 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005578 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5579 else if (mask & NETIF_F_ALL_CSUM) {
5580 /* If one device supports v4/v6 checksumming, set for all. */
5581 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5582 !(all & NETIF_F_GEN_CSUM)) {
5583 all &= ~NETIF_F_ALL_CSUM;
5584 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5585 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005586
Herbert Xub63365a2008-10-23 01:11:29 -07005587 /* If one device supports hw checksumming, set for all. */
5588 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5589 all &= ~NETIF_F_ALL_CSUM;
5590 all |= NETIF_F_HW_CSUM;
5591 }
5592 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005593
Herbert Xub63365a2008-10-23 01:11:29 -07005594 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005595
Herbert Xub63365a2008-10-23 01:11:29 -07005596 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005597 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005598 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005599
5600 return all;
5601}
Herbert Xub63365a2008-10-23 01:11:29 -07005602EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005603
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005604static struct hlist_head *netdev_create_hash(void)
5605{
5606 int i;
5607 struct hlist_head *hash;
5608
5609 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5610 if (hash != NULL)
5611 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5612 INIT_HLIST_HEAD(&hash[i]);
5613
5614 return hash;
5615}
5616
Eric W. Biederman881d9662007-09-17 11:56:21 -07005617/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005618static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005619{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005620 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005621
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005622 net->dev_name_head = netdev_create_hash();
5623 if (net->dev_name_head == NULL)
5624 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005625
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005626 net->dev_index_head = netdev_create_hash();
5627 if (net->dev_index_head == NULL)
5628 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005629
5630 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005631
5632err_idx:
5633 kfree(net->dev_name_head);
5634err_name:
5635 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005636}
5637
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005638/**
5639 * netdev_drivername - network driver for the device
5640 * @dev: network device
5641 * @buffer: buffer for resulting name
5642 * @len: size of buffer
5643 *
5644 * Determine network driver for device.
5645 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005646char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005647{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005648 const struct device_driver *driver;
5649 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005650
5651 if (len <= 0 || !buffer)
5652 return buffer;
5653 buffer[0] = 0;
5654
5655 parent = dev->dev.parent;
5656
5657 if (!parent)
5658 return buffer;
5659
5660 driver = parent->driver;
5661 if (driver && driver->name)
5662 strlcpy(buffer, driver->name, len);
5663 return buffer;
5664}
5665
Pavel Emelyanov46650792007-10-08 20:38:39 -07005666static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005667{
5668 kfree(net->dev_name_head);
5669 kfree(net->dev_index_head);
5670}
5671
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005672static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005673 .init = netdev_init,
5674 .exit = netdev_exit,
5675};
5676
Pavel Emelyanov46650792007-10-08 20:38:39 -07005677static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005678{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005679 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005680 /*
5681 * Push all migratable of the network devices back to the
5682 * initial network namespace
5683 */
5684 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005685restart:
5686 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005687 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005688 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005689
5690 /* Ignore unmoveable devices (i.e. loopback) */
5691 if (dev->features & NETIF_F_NETNS_LOCAL)
5692 continue;
5693
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005694 /* Delete virtual devices */
5695 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
Eric Dumazet23289a32009-10-27 07:06:36 +00005696 dev->rtnl_link_ops->dellink(dev, NULL);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005697 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005698 }
5699
Eric W. Biedermance286d32007-09-12 13:53:49 +02005700 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005701 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5702 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005703 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005704 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005705 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005706 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005707 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005708 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005709 }
5710 rtnl_unlock();
5711}
5712
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005713static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005714 .exit = default_device_exit,
5715};
5716
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717/*
5718 * Initialize the DEV module. At boot time this walks the device list and
5719 * unhooks any devices that fail to initialise (normally hardware not
5720 * present) and leaves us with a valid list of present and active devices.
5721 *
5722 */
5723
5724/*
5725 * This is called single threaded during boot, so no need
5726 * to take the rtnl semaphore.
5727 */
5728static int __init net_dev_init(void)
5729{
5730 int i, rc = -ENOMEM;
5731
5732 BUG_ON(!dev_boot_phase);
5733
Linus Torvalds1da177e2005-04-16 15:20:36 -07005734 if (dev_proc_init())
5735 goto out;
5736
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005737 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 goto out;
5739
5740 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005741 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005742 INIT_LIST_HEAD(&ptype_base[i]);
5743
Eric W. Biederman881d9662007-09-17 11:56:21 -07005744 if (register_pernet_subsys(&netdev_net_ops))
5745 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005746
5747 /*
5748 * Initialise the packet receive queues.
5749 */
5750
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005751 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005752 struct softnet_data *queue;
5753
5754 queue = &per_cpu(softnet_data, i);
5755 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005756 queue->completion_queue = NULL;
5757 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005758
5759 queue->backlog.poll = process_backlog;
5760 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005761 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005762 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 }
5764
Linus Torvalds1da177e2005-04-16 15:20:36 -07005765 dev_boot_phase = 0;
5766
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005767 /* The loopback device is special if any other network devices
5768 * is present in a network namespace the loopback device must
5769 * be present. Since we now dynamically allocate and free the
5770 * loopback device ensure this invariant is maintained by
5771 * keeping the loopback device as the first device on the
5772 * list of network devices. Ensuring the loopback devices
5773 * is the first device that appears and the last network device
5774 * that disappears.
5775 */
5776 if (register_pernet_device(&loopback_net_ops))
5777 goto out;
5778
5779 if (register_pernet_device(&default_device_ops))
5780 goto out;
5781
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005782 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5783 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784
5785 hotcpu_notifier(dev_cpu_callback, 0);
5786 dst_init();
5787 dev_mcast_init();
5788 rc = 0;
5789out:
5790 return rc;
5791}
5792
5793subsys_initcall(net_dev_init);
5794
Krishna Kumare88721f2009-02-18 17:55:02 -08005795static int __init initialize_hashrnd(void)
5796{
5797 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5798 return 0;
5799}
5800
5801late_initcall_sync(initialize_hashrnd);
5802