blob: 3c40d545a0351d80b6a20bbebec3a470e9c3be7b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
Eric W. Biederman881d9662007-09-17 11:56:21 -0700196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Eric W. Biedermance286d32007-09-12 13:53:49 +0200207/* Device list insertion */
208static int list_netdevice(struct net_device *dev)
209{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900210 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211
212 ASSERT_RTNL();
213
214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head);
Eric Dumazet72c95282009-10-30 07:11:27 +0000216 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex));
Eric W. Biedermance286d32007-09-12 13:53:49 +0200219 write_unlock_bh(&dev_base_lock);
220 return 0;
221}
222
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000223/* Device list removal
224 * caller must respect a RCU grace period before freeing/reusing dev
225 */
Eric W. Biedermance286d32007-09-12 13:53:49 +0200226static void unlist_netdevice(struct net_device *dev)
227{
228 ASSERT_RTNL();
229
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
Eric Dumazet72c95282009-10-30 07:11:27 +0000233 hlist_del_rcu(&dev->name_hlist);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000234 hlist_del_rcu(&dev->index_hlist);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200235 write_unlock_bh(&dev_base_lock);
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * Our notifier list
240 */
241
Alan Sternf07d5b92006-05-09 15:23:03 -0700242static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
247 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700248
249DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700250EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122c2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700384EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386/**
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
389 *
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 *
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
398 */
399void __dev_remove_pack(struct packet_type *pt)
400{
401 struct list_head *head;
402 struct packet_type *pt1;
403
404 spin_lock_bh(&ptype_lock);
405
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700406 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +0800409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
415 }
416 }
417
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419out:
420 spin_unlock_bh(&ptype_lock);
421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(__dev_remove_pack);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
432 *
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
435 */
436void dev_remove_pack(struct packet_type *pt)
437{
438 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 synchronize_net();
441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/******************************************************************************
445
446 Device Boot-time Settings Routines
447
448*******************************************************************************/
449
450/* Boot time configuration table */
451static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
452
453/**
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
457 *
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
461 */
462static int netdev_boot_setup_add(char *name, struct ifmap *map)
463{
464 struct netdev_boot_setup *s;
465 int i;
466
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700471 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
474 }
475 }
476
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478}
479
480/**
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
483 *
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
488 */
489int netdev_boot_setup_check(struct net_device *dev)
490{
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
493
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700496 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
502 }
503 }
504 return 0;
505}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700506EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508
509/**
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
513 *
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
518 */
519unsigned long netdev_boot_base(const char *prefix, int unit)
520{
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
524
525 sprintf(name, "%s%d", prefix, unit);
526
527 /*
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
530 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700531 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return 1;
533
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
538}
539
540/*
541 * Saves at boot time configured settings for any netdevice.
542 */
543int __init netdev_boot_setup(char *str)
544{
545 int ints[5];
546 struct ifmap map;
547
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
551
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
562
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
565}
566
567__setup("netdev=", netdev_boot_setup);
568
569/*******************************************************************************
570
571 Device Interface Subroutines
572
573*******************************************************************************/
574
575/**
576 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700577 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * @name: name to find
579 *
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
585 */
586
Eric W. Biederman881d9662007-09-17 11:56:21 -0700587struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700590 struct net_device *dev;
591 struct hlist_head *head = dev_name_hash(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700593 hlist_for_each_entry(dev, p, head, name_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 return NULL;
598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700599EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/**
Eric Dumazet72c95282009-10-30 07:11:27 +0000602 * dev_get_by_name_rcu - find a device by its name
603 * @net: the applicable net namespace
604 * @name: name to find
605 *
606 * Find an interface by name.
607 * If the name is found a pointer to the device is returned.
608 * If the name is not found then %NULL is returned.
609 * The reference counters are not incremented so the caller must be
610 * careful with locks. The caller must hold RCU lock.
611 */
612
613struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
614{
615 struct hlist_node *p;
616 struct net_device *dev;
617 struct hlist_head *head = dev_name_hash(net, name);
618
619 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
620 if (!strncmp(dev->name, name, IFNAMSIZ))
621 return dev;
622
623 return NULL;
624}
625EXPORT_SYMBOL(dev_get_by_name_rcu);
626
627/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700629 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 * @name: name to find
631 *
632 * Find an interface by name. This can be called from any
633 * context and does its own locking. The returned handle has
634 * the usage count incremented and the caller must use dev_put() to
635 * release it when it is no longer needed. %NULL is returned if no
636 * matching device is found.
637 */
638
Eric W. Biederman881d9662007-09-17 11:56:21 -0700639struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640{
641 struct net_device *dev;
642
Eric Dumazet72c95282009-10-30 07:11:27 +0000643 rcu_read_lock();
644 dev = dev_get_by_name_rcu(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (dev)
646 dev_hold(dev);
Eric Dumazet72c95282009-10-30 07:11:27 +0000647 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return dev;
649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700650EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652/**
653 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700654 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 * @ifindex: index of device
656 *
657 * Search for an interface by index. Returns %NULL if the device
658 * is not found or a pointer to the device. The device has not
659 * had its reference counter increased so the caller must be careful
660 * about locking. The caller must hold either the RTNL semaphore
661 * or @dev_base_lock.
662 */
663
Eric W. Biederman881d9662007-09-17 11:56:21 -0700664struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 struct hlist_node *p;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700667 struct net_device *dev;
668 struct hlist_head *head = dev_index_hash(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700670 hlist_for_each_entry(dev, p, head, index_hlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 if (dev->ifindex == ifindex)
672 return dev;
Eric Dumazet0bd8d532009-10-30 01:40:11 -0700673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return NULL;
675}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700676EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000678/**
679 * dev_get_by_index_rcu - find a device by its ifindex
680 * @net: the applicable net namespace
681 * @ifindex: index of device
682 *
683 * Search for an interface by index. Returns %NULL if the device
684 * is not found or a pointer to the device. The device has not
685 * had its reference counter increased so the caller must be careful
686 * about locking. The caller must hold RCU lock.
687 */
688
689struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
690{
691 struct hlist_node *p;
692 struct net_device *dev;
693 struct hlist_head *head = dev_index_hash(net, ifindex);
694
695 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
696 if (dev->ifindex == ifindex)
697 return dev;
698
699 return NULL;
700}
701EXPORT_SYMBOL(dev_get_by_index_rcu);
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
704/**
705 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700706 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 * @ifindex: index of device
708 *
709 * Search for an interface by index. Returns NULL if the device
710 * is not found or a pointer to the device. The device returned has
711 * had a reference added and the pointer is safe until the user calls
712 * dev_put to indicate they have finished with it.
713 */
714
Eric W. Biederman881d9662007-09-17 11:56:21 -0700715struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 struct net_device *dev;
718
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000719 rcu_read_lock();
720 dev = dev_get_by_index_rcu(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (dev)
722 dev_hold(dev);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +0000723 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 return dev;
725}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700726EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728/**
729 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700730 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 * @type: media type of device
732 * @ha: hardware address
733 *
734 * Search for an interface by MAC address. Returns NULL if the device
735 * is not found or a pointer to the device. The caller must hold the
736 * rtnl semaphore. The returned device has not had its ref count increased
737 * and the caller must therefore be careful about locking
738 *
739 * BUGS:
740 * If the API was consistent this would be __dev_get_by_hwaddr
741 */
742
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
745 struct net_device *dev;
746
747 ASSERT_RTNL();
748
Denis V. Lunev81103a52007-12-12 10:47:38 -0800749 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (dev->type == type &&
751 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700752 return dev;
753
754 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300756EXPORT_SYMBOL(dev_getbyhwaddr);
757
Eric W. Biederman881d9662007-09-17 11:56:21 -0700758struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700759{
760 struct net_device *dev;
761
762 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700763 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700764 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700765 return dev;
766
767 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700768}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700769EXPORT_SYMBOL(__dev_getfirstbyhwtype);
770
Eric W. Biederman881d9662007-09-17 11:56:21 -0700771struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772{
773 struct net_device *dev;
774
775 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700776 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700777 if (dev)
778 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 rtnl_unlock();
780 return dev;
781}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782EXPORT_SYMBOL(dev_getfirstbyhwtype);
783
784/**
785 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700786 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 * @if_flags: IFF_* values
788 * @mask: bitmask of bits in if_flags to check
789 *
790 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900791 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 * had a reference added and the pointer is safe until the user calls
793 * dev_put to indicate they have finished with it.
794 */
795
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700796struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
797 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700799 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Pavel Emelianov7562f872007-05-03 15:13:45 -0700801 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700803 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (((dev->flags ^ if_flags) & mask) == 0) {
805 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700806 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 break;
808 }
809 }
810 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700811 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700813EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815/**
816 * dev_valid_name - check if name is okay for network device
817 * @name: name string
818 *
819 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700820 * to allow sysfs to work. We also disallow any kind of
821 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800823int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700825 if (*name == '\0')
826 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700827 if (strlen(name) >= IFNAMSIZ)
828 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700829 if (!strcmp(name, ".") || !strcmp(name, ".."))
830 return 0;
831
832 while (*name) {
833 if (*name == '/' || isspace(*name))
834 return 0;
835 name++;
836 }
837 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700839EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200842 * __dev_alloc_name - allocate a name for a device
843 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 *
847 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700848 * id. It scans list of devices to build up a free map, then chooses
849 * the first empty slot. The caller must hold the dev_base or rtnl lock
850 * while allocating the name and adding the device in order to avoid
851 * duplicates.
852 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
853 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 */
855
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200856static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
858 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 const char *p;
860 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700861 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 struct net_device *d;
863
864 p = strnchr(name, IFNAMSIZ-1, '%');
865 if (p) {
866 /*
867 * Verify the string as this thing may have come from
868 * the user. There must be either one "%d" and no other "%"
869 * characters.
870 */
871 if (p[1] != 'd' || strchr(p + 2, '%'))
872 return -EINVAL;
873
874 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700875 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 if (!inuse)
877 return -ENOMEM;
878
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (!sscanf(d->name, name, &i))
881 continue;
882 if (i < 0 || i >= max_netdevices)
883 continue;
884
885 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200886 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 if (!strncmp(buf, d->name, IFNAMSIZ))
888 set_bit(i, inuse);
889 }
890
891 i = find_first_zero_bit(inuse, max_netdevices);
892 free_page((unsigned long) inuse);
893 }
894
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200895 snprintf(buf, IFNAMSIZ, name, i);
896 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /* It is possible to run out of possible slots
900 * when the name is long and there isn't enough space left
901 * for the digits, or if all bits are used.
902 */
903 return -ENFILE;
904}
905
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200906/**
907 * dev_alloc_name - allocate a name for a device
908 * @dev: device
909 * @name: name format string
910 *
911 * Passed a format string - eg "lt%d" it will try and find a suitable
912 * id. It scans list of devices to build up a free map, then chooses
913 * the first empty slot. The caller must hold the dev_base or rtnl lock
914 * while allocating the name and adding the device in order to avoid
915 * duplicates.
916 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
917 * Returns the number of the unit assigned or a negative errno code.
918 */
919
920int dev_alloc_name(struct net_device *dev, const char *name)
921{
922 char buf[IFNAMSIZ];
923 struct net *net;
924 int ret;
925
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900926 BUG_ON(!dev_net(dev));
927 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200928 ret = __dev_alloc_name(net, name, buf);
929 if (ret >= 0)
930 strlcpy(dev->name, buf, IFNAMSIZ);
931 return ret;
932}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700933EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936/**
937 * dev_change_name - change name of a device
938 * @dev: device
939 * @newname: name (or format string) must be at least IFNAMSIZ
940 *
941 * Change name of a device, can pass format strings "eth%d".
942 * for wildcarding.
943 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -0700944int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
Herbert Xufcc5a032007-07-30 17:03:38 -0700946 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700948 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700949 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
951 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900952 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900954 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if (dev->flags & IFF_UP)
956 return -EBUSY;
957
958 if (!dev_valid_name(newname))
959 return -EINVAL;
960
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700961 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
962 return 0;
963
Herbert Xufcc5a032007-07-30 17:03:38 -0700964 memcpy(oldname, dev->name, IFNAMSIZ);
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 if (strchr(newname, '%')) {
967 err = dev_alloc_name(dev, newname);
968 if (err < 0)
969 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700970 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return -EEXIST;
972 else
973 strlcpy(dev->name, newname, IFNAMSIZ);
974
Herbert Xufcc5a032007-07-30 17:03:38 -0700975rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700976 /* For now only devices in the initial network namespace
977 * are in sysfs.
978 */
979 if (net == &init_net) {
980 ret = device_rename(&dev->dev, dev->name);
981 if (ret) {
982 memcpy(dev->name, oldname, IFNAMSIZ);
983 return ret;
984 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700985 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700986
987 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600988 hlist_del(&dev->name_hlist);
Eric Dumazet72c95282009-10-30 07:11:27 +0000989 write_unlock_bh(&dev_base_lock);
990
991 synchronize_rcu();
992
993 write_lock_bh(&dev_base_lock);
994 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700995 write_unlock_bh(&dev_base_lock);
996
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700997 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700998 ret = notifier_to_errno(ret);
999
1000 if (ret) {
1001 if (err) {
1002 printk(KERN_ERR
1003 "%s: name change rollback failed: %d.\n",
1004 dev->name, ret);
1005 } else {
1006 err = ret;
1007 memcpy(dev->name, oldname, IFNAMSIZ);
1008 goto rollback;
1009 }
1010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
1012 return err;
1013}
1014
1015/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001016 * dev_set_alias - change ifalias of a device
1017 * @dev: device
1018 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07001019 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001020 *
1021 * Set ifalias for a device,
1022 */
1023int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1024{
1025 ASSERT_RTNL();
1026
1027 if (len >= IFALIASZ)
1028 return -EINVAL;
1029
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -07001030 if (!len) {
1031 if (dev->ifalias) {
1032 kfree(dev->ifalias);
1033 dev->ifalias = NULL;
1034 }
1035 return 0;
1036 }
1037
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001038 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001039 if (!dev->ifalias)
1040 return -ENOMEM;
1041
1042 strlcpy(dev->ifalias, alias, len+1);
1043 return len;
1044}
1045
1046
1047/**
Stephen Hemminger3041a062006-05-26 13:25:24 -07001048 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001049 * @dev: device to cause notification
1050 *
1051 * Called to indicate a device has changed features.
1052 */
1053void netdev_features_change(struct net_device *dev)
1054{
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001055 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001056}
1057EXPORT_SYMBOL(netdev_features_change);
1058
1059/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 * netdev_state_change - device changes state
1061 * @dev: device to cause notification
1062 *
1063 * Called to indicate a device has changed state. This function calls
1064 * the notifier chains for netdev_chain and sends a NEWLINK message
1065 * to the routing socket.
1066 */
1067void netdev_state_change(struct net_device *dev)
1068{
1069 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001070 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1072 }
1073}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001074EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Moni Shoua75c78502009-09-15 02:37:40 -07001076void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001077{
Moni Shoua75c78502009-09-15 02:37:40 -07001078 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001079}
1080EXPORT_SYMBOL(netdev_bonding_change);
1081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082/**
1083 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001084 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 * @name: name of interface
1086 *
1087 * If a network interface is not present and the process has suitable
1088 * privileges this function loads the module. If module loading is not
1089 * available in this kernel then it becomes a nop.
1090 */
1091
Eric W. Biederman881d9662007-09-17 11:56:21 -07001092void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001094 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095
Eric Dumazet72c95282009-10-30 07:11:27 +00001096 rcu_read_lock();
1097 dev = dev_get_by_name_rcu(net, name);
1098 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Eric Parisa8f80e82009-08-13 09:44:51 -04001100 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 request_module("%s", name);
1102}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001103EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105/**
1106 * dev_open - prepare an interface for use.
1107 * @dev: device to open
1108 *
1109 * Takes a device from down to up state. The device's private open
1110 * function is invoked and then the multicast lists are loaded. Finally
1111 * the device is moved into the up state and a %NETDEV_UP message is
1112 * sent to the netdev notifier chain.
1113 *
1114 * Calling this function on an active interface is a nop. On a failure
1115 * a negative errno code is returned.
1116 */
1117int dev_open(struct net_device *dev)
1118{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001119 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001120 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001122 ASSERT_RTNL();
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 /*
1125 * Is it already up?
1126 */
1127
1128 if (dev->flags & IFF_UP)
1129 return 0;
1130
1131 /*
1132 * Is it even present?
1133 */
1134 if (!netif_device_present(dev))
1135 return -ENODEV;
1136
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001137 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1138 ret = notifier_to_errno(ret);
1139 if (ret)
1140 return ret;
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 /*
1143 * Call device private open method
1144 */
1145 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001146
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001147 if (ops->ndo_validate_addr)
1148 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001149
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001150 if (!ret && ops->ndo_open)
1151 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001153 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 * If it went open OK then:
1155 */
1156
Jeff Garzikbada3392007-10-23 20:19:37 -07001157 if (ret)
1158 clear_bit(__LINK_STATE_START, &dev->state);
1159 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 /*
1161 * Set the flags.
1162 */
1163 dev->flags |= IFF_UP;
1164
1165 /*
Dan Williams649274d2009-01-11 00:20:39 -08001166 * Enable NET_DMA
1167 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001168 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001169
1170 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 * Initialize multicasting status
1172 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001173 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
1175 /*
1176 * Wakeup transmit queue engine
1177 */
1178 dev_activate(dev);
1179
1180 /*
1181 * ... and announce new interface.
1182 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001183 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 return ret;
1187}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001188EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190/**
1191 * dev_close - shutdown an interface.
1192 * @dev: device to shutdown
1193 *
1194 * This function moves an active device into down state. A
1195 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1196 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1197 * chain.
1198 */
1199int dev_close(struct net_device *dev)
1200{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001201 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001202 ASSERT_RTNL();
1203
David S. Miller9d5010d2007-09-12 14:33:25 +02001204 might_sleep();
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 if (!(dev->flags & IFF_UP))
1207 return 0;
1208
1209 /*
1210 * Tell people we are going down, so that they can
1211 * prepare to death, when device is still operating.
1212 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001213 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 clear_bit(__LINK_STATE_START, &dev->state);
1216
1217 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001218 * it can be even on different cpu. So just clear netif_running().
1219 *
1220 * dev->stop() will invoke napi_disable() on all of it's
1221 * napi_struct instances on this device.
1222 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001225 dev_deactivate(dev);
1226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 /*
1228 * Call the device specific close. This cannot fail.
1229 * Only if device is UP
1230 *
1231 * We allow it to be called even after a DETACH hot-plug
1232 * event.
1233 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001234 if (ops->ndo_stop)
1235 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 /*
1238 * Device is now down.
1239 */
1240
1241 dev->flags &= ~IFF_UP;
1242
1243 /*
1244 * Tell people we are down
1245 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001246 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
Dan Williams649274d2009-01-11 00:20:39 -08001248 /*
1249 * Shutdown NET_DMA
1250 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001251 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001252
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 return 0;
1254}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001255EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001258/**
1259 * dev_disable_lro - disable Large Receive Offload on a device
1260 * @dev: device
1261 *
1262 * Disable Large Receive Offload (LRO) on a net device. Must be
1263 * called under RTNL. This is needed if received packets may be
1264 * forwarded to another interface.
1265 */
1266void dev_disable_lro(struct net_device *dev)
1267{
1268 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1269 dev->ethtool_ops->set_flags) {
1270 u32 flags = dev->ethtool_ops->get_flags(dev);
1271 if (flags & ETH_FLAG_LRO) {
1272 flags &= ~ETH_FLAG_LRO;
1273 dev->ethtool_ops->set_flags(dev, flags);
1274 }
1275 }
1276 WARN_ON(dev->features & NETIF_F_LRO);
1277}
1278EXPORT_SYMBOL(dev_disable_lro);
1279
1280
Eric W. Biederman881d9662007-09-17 11:56:21 -07001281static int dev_boot_phase = 1;
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283/*
1284 * Device change register/unregister. These are not inline or static
1285 * as we export them to the world.
1286 */
1287
1288/**
1289 * register_netdevice_notifier - register a network notifier block
1290 * @nb: notifier
1291 *
1292 * Register a notifier to be called when network device events occur.
1293 * The notifier passed is linked into the kernel structures and must
1294 * not be reused until it has been unregistered. A negative errno code
1295 * is returned on a failure.
1296 *
1297 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001298 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 * view of the network device list.
1300 */
1301
1302int register_netdevice_notifier(struct notifier_block *nb)
1303{
1304 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001305 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001306 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 int err;
1308
1309 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001310 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001311 if (err)
1312 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001313 if (dev_boot_phase)
1314 goto unlock;
1315 for_each_net(net) {
1316 for_each_netdev(net, dev) {
1317 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1318 err = notifier_to_errno(err);
1319 if (err)
1320 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Eric W. Biederman881d9662007-09-17 11:56:21 -07001322 if (!(dev->flags & IFF_UP))
1323 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001324
Eric W. Biederman881d9662007-09-17 11:56:21 -07001325 nb->notifier_call(nb, NETDEV_UP, dev);
1326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001328
1329unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 rtnl_unlock();
1331 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001332
1333rollback:
1334 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001335 for_each_net(net) {
1336 for_each_netdev(net, dev) {
1337 if (dev == last)
1338 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001339
Eric W. Biederman881d9662007-09-17 11:56:21 -07001340 if (dev->flags & IFF_UP) {
1341 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1342 nb->notifier_call(nb, NETDEV_DOWN, dev);
1343 }
1344 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001345 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001346 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001347
1348 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001349 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001351EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353/**
1354 * unregister_netdevice_notifier - unregister a network notifier block
1355 * @nb: notifier
1356 *
1357 * Unregister a notifier previously registered by
1358 * register_netdevice_notifier(). The notifier is unlinked into the
1359 * kernel structures and may then be reused. A negative errno code
1360 * is returned on a failure.
1361 */
1362
1363int unregister_netdevice_notifier(struct notifier_block *nb)
1364{
Herbert Xu9f514952006-03-25 01:24:25 -08001365 int err;
1366
1367 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001368 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001369 rtnl_unlock();
1370 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001372EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
1374/**
1375 * call_netdevice_notifiers - call all network notifier blocks
1376 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001377 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 *
1379 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001380 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 */
1382
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001383int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001385 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386}
1387
1388/* When > 0 there are consumers of rx skb time stamps */
1389static atomic_t netstamp_needed = ATOMIC_INIT(0);
1390
1391void net_enable_timestamp(void)
1392{
1393 atomic_inc(&netstamp_needed);
1394}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001395EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397void net_disable_timestamp(void)
1398{
1399 atomic_dec(&netstamp_needed);
1400}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001401EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001403static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
1405 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001406 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001407 else
1408 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409}
1410
1411/*
1412 * Support routine. Sends outgoing frames to any network
1413 * taps currently in use.
1414 */
1415
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001416static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417{
1418 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001419
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001420#ifdef CONFIG_NET_CLS_ACT
1421 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1422 net_timestamp(skb);
1423#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001424 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001425#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 rcu_read_lock();
1428 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1429 /* Never send packets back to the socket
1430 * they originated from - MvS (miquels@drinkel.ow.org)
1431 */
1432 if ((ptype->dev == dev || !ptype->dev) &&
1433 (ptype->af_packet_priv == NULL ||
1434 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001435 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 if (!skb2)
1437 break;
1438
1439 /* skb->nh should be correctly
1440 set by sender, so that the second statement is
1441 just protection against buggy protocols.
1442 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001443 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001445 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001446 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 if (net_ratelimit())
1448 printk(KERN_CRIT "protocol %04x is "
1449 "buggy, dev %s\n",
1450 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001451 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 }
1453
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001454 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001456 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 }
1458 }
1459 rcu_read_unlock();
1460}
1461
Denis Vlasenko56079432006-03-29 15:57:29 -08001462
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001463static inline void __netif_reschedule(struct Qdisc *q)
1464{
1465 struct softnet_data *sd;
1466 unsigned long flags;
1467
1468 local_irq_save(flags);
1469 sd = &__get_cpu_var(softnet_data);
1470 q->next_sched = sd->output_queue;
1471 sd->output_queue = q;
1472 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1473 local_irq_restore(flags);
1474}
1475
David S. Miller37437bb2008-07-16 02:15:04 -07001476void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001477{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001478 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1479 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001480}
1481EXPORT_SYMBOL(__netif_schedule);
1482
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001483void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001484{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001485 if (atomic_dec_and_test(&skb->users)) {
1486 struct softnet_data *sd;
1487 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001488
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001489 local_irq_save(flags);
1490 sd = &__get_cpu_var(softnet_data);
1491 skb->next = sd->completion_queue;
1492 sd->completion_queue = skb;
1493 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1494 local_irq_restore(flags);
1495 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001496}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001497EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001498
1499void dev_kfree_skb_any(struct sk_buff *skb)
1500{
1501 if (in_irq() || irqs_disabled())
1502 dev_kfree_skb_irq(skb);
1503 else
1504 dev_kfree_skb(skb);
1505}
1506EXPORT_SYMBOL(dev_kfree_skb_any);
1507
1508
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001509/**
1510 * netif_device_detach - mark device as removed
1511 * @dev: network device
1512 *
1513 * Mark device as removed from system and therefore no longer available.
1514 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001515void netif_device_detach(struct net_device *dev)
1516{
1517 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1518 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001519 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001520 }
1521}
1522EXPORT_SYMBOL(netif_device_detach);
1523
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001524/**
1525 * netif_device_attach - mark device as attached
1526 * @dev: network device
1527 *
1528 * Mark device as attached from system and restart if needed.
1529 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001530void netif_device_attach(struct net_device *dev)
1531{
1532 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1533 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001534 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001535 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001536 }
1537}
1538EXPORT_SYMBOL(netif_device_attach);
1539
Ben Hutchings6de329e2008-06-16 17:02:28 -07001540static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1541{
1542 return ((features & NETIF_F_GEN_CSUM) ||
1543 ((features & NETIF_F_IP_CSUM) &&
1544 protocol == htons(ETH_P_IP)) ||
1545 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001546 protocol == htons(ETH_P_IPV6)) ||
1547 ((features & NETIF_F_FCOE_CRC) &&
1548 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001549}
1550
1551static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1552{
1553 if (can_checksum_protocol(dev->features, skb->protocol))
1554 return true;
1555
1556 if (skb->protocol == htons(ETH_P_8021Q)) {
1557 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1558 if (can_checksum_protocol(dev->features & dev->vlan_features,
1559 veh->h_vlan_encapsulated_proto))
1560 return true;
1561 }
1562
1563 return false;
1564}
Denis Vlasenko56079432006-03-29 15:57:29 -08001565
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566/*
1567 * Invalidate hardware checksum when packet is to be mangled, and
1568 * complete checksum manually on outgoing path.
1569 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001570int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
Al Virod3bc23e2006-11-14 21:24:49 -08001572 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001573 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Patrick McHardy84fa7932006-08-29 16:44:56 -07001575 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001576 goto out_set_summed;
1577
1578 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001579 /* Let GSO fix up the checksum. */
1580 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 }
1582
Herbert Xua0308472007-10-15 01:47:15 -07001583 offset = skb->csum_start - skb_headroom(skb);
1584 BUG_ON(offset >= skb_headlen(skb));
1585 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1586
1587 offset += skb->csum_offset;
1588 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1589
1590 if (skb_cloned(skb) &&
1591 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1593 if (ret)
1594 goto out;
1595 }
1596
Herbert Xua0308472007-10-15 01:47:15 -07001597 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001598out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001600out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 return ret;
1602}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001603EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605/**
1606 * skb_gso_segment - Perform segmentation on skb.
1607 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001608 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001609 *
1610 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001611 *
1612 * It may return NULL if the skb requires no segmentation. This is
1613 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001614 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001615struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001616{
1617 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1618 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001619 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001620 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001621
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001622 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001623 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001624 __skb_pull(skb, skb->mac_len);
1625
Herbert Xu67fd1a72009-01-19 16:26:44 -08001626 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1627 struct net_device *dev = skb->dev;
1628 struct ethtool_drvinfo info = {};
1629
1630 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1631 dev->ethtool_ops->get_drvinfo(dev, &info);
1632
1633 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1634 "ip_summed=%d",
1635 info.driver, dev ? dev->features : 0L,
1636 skb->sk ? skb->sk->sk_route_caps : 0L,
1637 skb->len, skb->data_len, skb->ip_summed);
1638
Herbert Xua430a432006-07-08 13:34:56 -07001639 if (skb_header_cloned(skb) &&
1640 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1641 return ERR_PTR(err);
1642 }
1643
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001644 rcu_read_lock();
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08001645 list_for_each_entry_rcu(ptype,
1646 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001647 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001648 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001649 err = ptype->gso_send_check(skb);
1650 segs = ERR_PTR(err);
1651 if (err || skb_gso_ok(skb, features))
1652 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001653 __skb_push(skb, (skb->data -
1654 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001655 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001657 break;
1658 }
1659 }
1660 rcu_read_unlock();
1661
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001662 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001663
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001664 return segs;
1665}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666EXPORT_SYMBOL(skb_gso_segment);
1667
Herbert Xufb286bb2005-11-10 13:01:24 -08001668/* Take action when hardware reception checksum errors are detected. */
1669#ifdef CONFIG_BUG
1670void netdev_rx_csum_fault(struct net_device *dev)
1671{
1672 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001673 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001674 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001675 dump_stack();
1676 }
1677}
1678EXPORT_SYMBOL(netdev_rx_csum_fault);
1679#endif
1680
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681/* Actually, we should eliminate this check as soon as we know, that:
1682 * 1. IOMMU is present and allows to map all the memory.
1683 * 2. No high memory really exists on this machine.
1684 */
1685
1686static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1687{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001688#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 int i;
1690
1691 if (dev->features & NETIF_F_HIGHDMA)
1692 return 0;
1693
1694 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1695 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1696 return 1;
1697
Herbert Xu3d3a8532006-06-27 13:33:10 -07001698#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return 0;
1700}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001702struct dev_gso_cb {
1703 void (*destructor)(struct sk_buff *skb);
1704};
1705
1706#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1707
1708static void dev_gso_skb_destructor(struct sk_buff *skb)
1709{
1710 struct dev_gso_cb *cb;
1711
1712 do {
1713 struct sk_buff *nskb = skb->next;
1714
1715 skb->next = nskb->next;
1716 nskb->next = NULL;
1717 kfree_skb(nskb);
1718 } while (skb->next);
1719
1720 cb = DEV_GSO_CB(skb);
1721 if (cb->destructor)
1722 cb->destructor(skb);
1723}
1724
1725/**
1726 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1727 * @skb: buffer to segment
1728 *
1729 * This function segments the given skb and stores the list of segments
1730 * in skb->next.
1731 */
1732static int dev_gso_segment(struct sk_buff *skb)
1733{
1734 struct net_device *dev = skb->dev;
1735 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001736 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1737 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001738
Herbert Xu576a30e2006-06-27 13:22:38 -07001739 segs = skb_gso_segment(skb, features);
1740
1741 /* Verifying header integrity only. */
1742 if (!segs)
1743 return 0;
1744
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001745 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001746 return PTR_ERR(segs);
1747
1748 skb->next = segs;
1749 DEV_GSO_CB(skb)->destructor = skb->destructor;
1750 skb->destructor = dev_gso_skb_destructor;
1751
1752 return 0;
1753}
1754
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001755int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1756 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001757{
Stephen Hemminger00829822008-11-20 20:14:53 -08001758 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001759 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001760
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001761 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001762 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001763 dev_queue_xmit_nit(skb, dev);
1764
Herbert Xu576a30e2006-06-27 13:22:38 -07001765 if (netif_needs_gso(dev, skb)) {
1766 if (unlikely(dev_gso_segment(skb)))
1767 goto out_kfree_skb;
1768 if (skb->next)
1769 goto gso;
1770 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001771
Eric Dumazet93f154b2009-05-18 22:19:19 -07001772 /*
1773 * If device doesnt need skb->dst, release it right now while
1774 * its hot in this cpu cache
1775 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001776 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1777 skb_dst_drop(skb);
1778
Patrick Ohlyac45f602009-02-12 05:03:37 +00001779 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001780 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001781 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001782 /*
1783 * TODO: if skb_orphan() was called by
1784 * dev->hard_start_xmit() (for example, the unmodified
1785 * igb driver does that; bnx2 doesn't), then
1786 * skb_tx_software_timestamp() will be unable to send
1787 * back the time stamp.
1788 *
1789 * How can this be prevented? Always create another
1790 * reference to the socket before calling
1791 * dev->hard_start_xmit()? Prevent that skb_orphan()
1792 * does anything in dev->hard_start_xmit() by clearing
1793 * the skb destructor before the call and restoring it
1794 * afterwards, then doing the skb_orphan() ourselves?
1795 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001796 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001797 }
1798
Herbert Xu576a30e2006-06-27 13:22:38 -07001799gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001800 do {
1801 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001802
1803 skb->next = nskb->next;
1804 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001805 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001806 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001807 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001808 skb->next = nskb;
1809 return rc;
1810 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001811 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001812 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001813 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001814 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001815
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001816 skb->destructor = DEV_GSO_CB(skb)->destructor;
1817
1818out_kfree_skb:
1819 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001820 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001821}
1822
David S. Miller70192982009-01-27 16:34:47 -08001823static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001824
Stephen Hemminger92477442009-03-21 13:39:26 -07001825u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001826{
David S. Miller70192982009-01-27 16:34:47 -08001827 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001828
David S. Miller513de112009-05-03 14:43:10 -07001829 if (skb_rx_queue_recorded(skb)) {
1830 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001831 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001832 hash -= dev->real_num_tx_queues;
1833 return hash;
1834 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001835
1836 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001837 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001838 else
David S. Miller70192982009-01-27 16:34:47 -08001839 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001840
David S. Miller70192982009-01-27 16:34:47 -08001841 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001842
David S. Millerb6b2fed2008-07-21 09:48:06 -07001843 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001844}
Stephen Hemminger92477442009-03-21 13:39:26 -07001845EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001846
David S. Millere8a04642008-07-17 00:34:19 -07001847static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1848 struct sk_buff *skb)
1849{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001850 u16 queue_index;
1851 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001852
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001853 if (sk_tx_queue_recorded(sk)) {
1854 queue_index = sk_tx_queue_get(sk);
1855 } else {
1856 const struct net_device_ops *ops = dev->netdev_ops;
1857
1858 if (ops->ndo_select_queue) {
1859 queue_index = ops->ndo_select_queue(dev, skb);
1860 } else {
1861 queue_index = 0;
1862 if (dev->real_num_tx_queues > 1)
1863 queue_index = skb_tx_hash(dev, skb);
1864
1865 if (sk && sk->sk_dst_cache)
1866 sk_tx_queue_set(sk, queue_index);
1867 }
1868 }
David S. Millereae792b2008-07-15 03:03:33 -07001869
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001870 skb_set_queue_mapping(skb, queue_index);
1871 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001872}
1873
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001874static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1875 struct net_device *dev,
1876 struct netdev_queue *txq)
1877{
1878 spinlock_t *root_lock = qdisc_lock(q);
1879 int rc;
1880
1881 spin_lock(root_lock);
1882 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1883 kfree_skb(skb);
1884 rc = NET_XMIT_DROP;
1885 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1886 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1887 /*
1888 * This is a work-conserving queue; there are no old skbs
1889 * waiting to be sent out; and the qdisc is not running -
1890 * xmit the skb directly.
1891 */
1892 __qdisc_update_bstats(q, skb->len);
1893 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1894 __qdisc_run(q);
1895 else
1896 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1897
1898 rc = NET_XMIT_SUCCESS;
1899 } else {
1900 rc = qdisc_enqueue_root(skb, q);
1901 qdisc_run(q);
1902 }
1903 spin_unlock(root_lock);
1904
1905 return rc;
1906}
1907
Dave Jonesd29f7492008-07-22 14:09:06 -07001908/**
1909 * dev_queue_xmit - transmit a buffer
1910 * @skb: buffer to transmit
1911 *
1912 * Queue a buffer for transmission to a network device. The caller must
1913 * have set the device and priority and built the buffer before calling
1914 * this function. The function can be called from an interrupt.
1915 *
1916 * A negative errno code is returned on a failure. A success does not
1917 * guarantee the frame will be transmitted as it may be dropped due
1918 * to congestion or traffic shaping.
1919 *
1920 * -----------------------------------------------------------------------------------
1921 * I notice this method can also return errors from the queue disciplines,
1922 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1923 * be positive.
1924 *
1925 * Regardless of the return value, the skb is consumed, so it is currently
1926 * difficult to retry a send to this method. (You can bump the ref count
1927 * before sending to hold a reference for retry if you are careful.)
1928 *
1929 * When calling this method, interrupts MUST be enabled. This is because
1930 * the BH enable code must have IRQs enabled so that it will not deadlock.
1931 * --BLG
1932 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933int dev_queue_xmit(struct sk_buff *skb)
1934{
1935 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001936 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 struct Qdisc *q;
1938 int rc = -ENOMEM;
1939
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001940 /* GSO will handle the following emulations directly. */
1941 if (netif_needs_gso(dev, skb))
1942 goto gso;
1943
David S. Miller4cf704f2009-06-09 00:18:51 -07001944 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001946 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 goto out_kfree_skb;
1948
1949 /* Fragmented skb is linearized if device does not support SG,
1950 * or if at least one of fragments is in highmem and device
1951 * does not support DMA from it.
1952 */
1953 if (skb_shinfo(skb)->nr_frags &&
1954 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001955 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 goto out_kfree_skb;
1957
1958 /* If packet is not checksummed and device does not support
1959 * checksumming for this protocol, complete checksumming here.
1960 */
Herbert Xu663ead32007-04-09 11:59:07 -07001961 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1962 skb_set_transport_header(skb, skb->csum_start -
1963 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001964 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1965 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001968gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001969 /* Disable soft irqs for various locks below. Also
1970 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001972 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
David S. Millereae792b2008-07-15 03:03:33 -07001974 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001975 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001978 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979#endif
1980 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001981 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001982 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
1984
1985 /* The device has no queue. Common case for software devices:
1986 loopback, all the sorts of tunnels...
1987
Herbert Xu932ff272006-06-09 12:20:56 -07001988 Really, it is unlikely that netif_tx_lock protection is necessary
1989 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 counters.)
1991 However, it is possible, that they rely on protection
1992 made by us here.
1993
1994 Check this and shot the lock. It is not prone from deadlocks.
1995 Either shot noqueue qdisc, it is even simpler 8)
1996 */
1997 if (dev->flags & IFF_UP) {
1998 int cpu = smp_processor_id(); /* ok because BHs are off */
1999
David S. Millerc773e842008-07-08 23:13:53 -07002000 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
David S. Millerc773e842008-07-08 23:13:53 -07002002 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002004 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00002005 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002006 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07002007 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 goto out;
2009 }
2010 }
David S. Millerc773e842008-07-08 23:13:53 -07002011 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 if (net_ratelimit())
2013 printk(KERN_CRIT "Virtual device %s asks to "
2014 "queue packet!\n", dev->name);
2015 } else {
2016 /* Recursion is detected! It is possible,
2017 * unfortunately */
2018 if (net_ratelimit())
2019 printk(KERN_CRIT "Dead loop on virtual device "
2020 "%s, fix it urgently!\n", dev->name);
2021 }
2022 }
2023
2024 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07002025 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027out_kfree_skb:
2028 kfree_skb(skb);
2029 return rc;
2030out:
Herbert Xud4828d82006-06-22 02:28:18 -07002031 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return rc;
2033}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002034EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036
2037/*=======================================================================
2038 Receiver routines
2039 =======================================================================*/
2040
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07002041int netdev_max_backlog __read_mostly = 1000;
2042int netdev_budget __read_mostly = 300;
2043int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2046
2047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048/**
2049 * netif_rx - post buffer to the network code
2050 * @skb: buffer to post
2051 *
2052 * This function receives a packet from a device driver and queues it for
2053 * the upper (protocol) levels to process. It always succeeds. The buffer
2054 * may be dropped during processing for congestion control or by the
2055 * protocol layers.
2056 *
2057 * return values:
2058 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 * NET_RX_DROP (packet was dropped)
2060 *
2061 */
2062
2063int netif_rx(struct sk_buff *skb)
2064{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 struct softnet_data *queue;
2066 unsigned long flags;
2067
2068 /* if netpoll wants it, pretend we never saw it */
2069 if (netpoll_rx(skb))
2070 return NET_RX_DROP;
2071
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002072 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002073 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 /*
2076 * The code is rearranged so that the path is the most
2077 * short when CPU is congested, but is still operating.
2078 */
2079 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 queue = &__get_cpu_var(softnet_data);
2081
2082 __get_cpu_var(netdev_rx_stat).total++;
2083 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2084 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002088 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
2090
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002091 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 goto enqueue;
2093 }
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 __get_cpu_var(netdev_rx_stat).dropped++;
2096 local_irq_restore(flags);
2097
2098 kfree_skb(skb);
2099 return NET_RX_DROP;
2100}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002101EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103int netif_rx_ni(struct sk_buff *skb)
2104{
2105 int err;
2106
2107 preempt_disable();
2108 err = netif_rx(skb);
2109 if (local_softirq_pending())
2110 do_softirq();
2111 preempt_enable();
2112
2113 return err;
2114}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115EXPORT_SYMBOL(netif_rx_ni);
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117static void net_tx_action(struct softirq_action *h)
2118{
2119 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2120
2121 if (sd->completion_queue) {
2122 struct sk_buff *clist;
2123
2124 local_irq_disable();
2125 clist = sd->completion_queue;
2126 sd->completion_queue = NULL;
2127 local_irq_enable();
2128
2129 while (clist) {
2130 struct sk_buff *skb = clist;
2131 clist = clist->next;
2132
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002133 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 __kfree_skb(skb);
2135 }
2136 }
2137
2138 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002139 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 local_irq_disable();
2142 head = sd->output_queue;
2143 sd->output_queue = NULL;
2144 local_irq_enable();
2145
2146 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002147 struct Qdisc *q = head;
2148 spinlock_t *root_lock;
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 head = head->next_sched;
2151
David S. Miller5fb66222008-08-02 20:02:43 -07002152 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002153 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002154 smp_mb__before_clear_bit();
2155 clear_bit(__QDISC_STATE_SCHED,
2156 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002157 qdisc_run(q);
2158 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002160 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002161 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002162 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002163 } else {
2164 smp_mb__before_clear_bit();
2165 clear_bit(__QDISC_STATE_SCHED,
2166 &q->state);
2167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 }
2169 }
2170 }
2171}
2172
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002173static inline int deliver_skb(struct sk_buff *skb,
2174 struct packet_type *pt_prev,
2175 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176{
2177 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002178 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179}
2180
2181#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002182
2183#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2184/* This hook is defined here for ATM LANE */
2185int (*br_fdb_test_addr_hook)(struct net_device *dev,
2186 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002187EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002188#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Stephen Hemminger6229e362007-03-21 13:38:47 -07002190/*
2191 * If bridge module is loaded call bridging hook.
2192 * returns NULL if packet was consumed.
2193 */
2194struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2195 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002196EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002197
Stephen Hemminger6229e362007-03-21 13:38:47 -07002198static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2199 struct packet_type **pt_prev, int *ret,
2200 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
2202 struct net_bridge_port *port;
2203
Stephen Hemminger6229e362007-03-21 13:38:47 -07002204 if (skb->pkt_type == PACKET_LOOPBACK ||
2205 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2206 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002209 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002211 }
2212
Stephen Hemminger6229e362007-03-21 13:38:47 -07002213 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214}
2215#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002216#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217#endif
2218
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002219#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2220struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2221EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2222
2223static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2224 struct packet_type **pt_prev,
2225 int *ret,
2226 struct net_device *orig_dev)
2227{
2228 if (skb->dev->macvlan_port == NULL)
2229 return skb;
2230
2231 if (*pt_prev) {
2232 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2233 *pt_prev = NULL;
2234 }
2235 return macvlan_handle_frame_hook(skb);
2236}
2237#else
2238#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2239#endif
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241#ifdef CONFIG_NET_CLS_ACT
2242/* TODO: Maybe we should just force sch_ingress to be compiled in
2243 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2244 * a compare and 2 stores extra right now if we dont have it on
2245 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002246 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 * the ingress scheduler, you just cant add policies on ingress.
2248 *
2249 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002250static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002253 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002254 struct netdev_queue *rxq;
2255 int result = TC_ACT_OK;
2256 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002257
Herbert Xuf697c3e2007-10-14 00:38:47 -07002258 if (MAX_RED_LOOP < ttl++) {
2259 printk(KERN_WARNING
2260 "Redir loop detected Dropping packet (%d->%d)\n",
2261 skb->iif, dev->ifindex);
2262 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264
Herbert Xuf697c3e2007-10-14 00:38:47 -07002265 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2266 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2267
David S. Miller555353c2008-07-08 17:33:13 -07002268 rxq = &dev->rx_queue;
2269
David S. Miller83874002008-07-17 00:53:03 -07002270 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002271 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002272 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002273 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2274 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002275 spin_unlock(qdisc_lock(q));
2276 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002277
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 return result;
2279}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002280
2281static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2282 struct packet_type **pt_prev,
2283 int *ret, struct net_device *orig_dev)
2284{
David S. Miller8d50b532008-07-30 02:37:46 -07002285 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002286 goto out;
2287
2288 if (*pt_prev) {
2289 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2290 *pt_prev = NULL;
2291 } else {
2292 /* Huh? Why does turning on AF_PACKET affect this? */
2293 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2294 }
2295
2296 switch (ing_filter(skb)) {
2297 case TC_ACT_SHOT:
2298 case TC_ACT_STOLEN:
2299 kfree_skb(skb);
2300 return NULL;
2301 }
2302
2303out:
2304 skb->tc_verd = 0;
2305 return skb;
2306}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307#endif
2308
Patrick McHardybc1d0412008-07-14 22:49:30 -07002309/*
2310 * netif_nit_deliver - deliver received packets to network taps
2311 * @skb: buffer
2312 *
2313 * This function is used to deliver incoming packets to network
2314 * taps. It should be used when the normal netif_receive_skb path
2315 * is bypassed, for example because of VLAN acceleration.
2316 */
2317void netif_nit_deliver(struct sk_buff *skb)
2318{
2319 struct packet_type *ptype;
2320
2321 if (list_empty(&ptype_all))
2322 return;
2323
2324 skb_reset_network_header(skb);
2325 skb_reset_transport_header(skb);
2326 skb->mac_len = skb->network_header - skb->mac_header;
2327
2328 rcu_read_lock();
2329 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2330 if (!ptype->dev || ptype->dev == skb->dev)
2331 deliver_skb(skb, ptype, skb->dev);
2332 }
2333 rcu_read_unlock();
2334}
2335
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002336/**
2337 * netif_receive_skb - process receive buffer from network
2338 * @skb: buffer to process
2339 *
2340 * netif_receive_skb() is the main receive data processing function.
2341 * It always succeeds. The buffer may be dropped during processing
2342 * for congestion control or by the protocol layers.
2343 *
2344 * This function may only be called from softirq context and interrupts
2345 * should be enabled.
2346 *
2347 * Return values (usually ignored):
2348 * NET_RX_SUCCESS: no congestion
2349 * NET_RX_DROP: packet was dropped
2350 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351int netif_receive_skb(struct sk_buff *skb)
2352{
2353 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002354 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002355 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002357 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002359 if (!skb->tstamp.tv64)
2360 net_timestamp(skb);
2361
Eric Dumazet05423b22009-10-26 18:40:35 -07002362 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002363 return NET_RX_SUCCESS;
2364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002366 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 return NET_RX_DROP;
2368
Patrick McHardyc01003c2007-03-29 11:46:52 -07002369 if (!skb->iif)
2370 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002371
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002372 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002373 orig_dev = skb->dev;
2374 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002375 if (skb_bond_should_drop(skb))
2376 null_or_orig = orig_dev; /* deliver only exact match */
2377 else
2378 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002379 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 __get_cpu_var(netdev_rx_stat).total++;
2382
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002383 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002384 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002385 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
2387 pt_prev = NULL;
2388
2389 rcu_read_lock();
2390
2391#ifdef CONFIG_NET_CLS_ACT
2392 if (skb->tc_verd & TC_NCLS) {
2393 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2394 goto ncls;
2395 }
2396#endif
2397
2398 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002399 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2400 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002401 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002402 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 pt_prev = ptype;
2404 }
2405 }
2406
2407#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002408 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2409 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411ncls:
2412#endif
2413
Stephen Hemminger6229e362007-03-21 13:38:47 -07002414 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2415 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002417 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2418 if (!skb)
2419 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 type = skb->protocol;
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08002422 list_for_each_entry_rcu(ptype,
2423 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002425 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2426 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002427 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002428 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 pt_prev = ptype;
2430 }
2431 }
2432
2433 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002434 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 } else {
2436 kfree_skb(skb);
2437 /* Jamal, now you will not able to escape explaining
2438 * me how you were going to use this. :-)
2439 */
2440 ret = NET_RX_DROP;
2441 }
2442
2443out:
2444 rcu_read_unlock();
2445 return ret;
2446}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002447EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002449/* Network device is going away, flush any packets still pending */
2450static void flush_backlog(void *arg)
2451{
2452 struct net_device *dev = arg;
2453 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2454 struct sk_buff *skb, *tmp;
2455
2456 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2457 if (skb->dev == dev) {
2458 __skb_unlink(skb, &queue->input_pkt_queue);
2459 kfree_skb(skb);
2460 }
2461}
2462
Herbert Xud565b0a2008-12-15 23:38:52 -08002463static int napi_gro_complete(struct sk_buff *skb)
2464{
2465 struct packet_type *ptype;
2466 __be16 type = skb->protocol;
2467 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2468 int err = -ENOENT;
2469
Herbert Xufc59f9a2009-04-14 15:11:06 -07002470 if (NAPI_GRO_CB(skb)->count == 1) {
2471 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002472 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002473 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002474
2475 rcu_read_lock();
2476 list_for_each_entry_rcu(ptype, head, list) {
2477 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2478 continue;
2479
2480 err = ptype->gro_complete(skb);
2481 break;
2482 }
2483 rcu_read_unlock();
2484
2485 if (err) {
2486 WARN_ON(&ptype->list == head);
2487 kfree_skb(skb);
2488 return NET_RX_SUCCESS;
2489 }
2490
2491out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002492 return netif_receive_skb(skb);
2493}
2494
2495void napi_gro_flush(struct napi_struct *napi)
2496{
2497 struct sk_buff *skb, *next;
2498
2499 for (skb = napi->gro_list; skb; skb = next) {
2500 next = skb->next;
2501 skb->next = NULL;
2502 napi_gro_complete(skb);
2503 }
2504
Herbert Xu4ae55442009-02-08 18:00:36 +00002505 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002506 napi->gro_list = NULL;
2507}
2508EXPORT_SYMBOL(napi_gro_flush);
2509
Ben Hutchings5b252f02009-10-29 07:17:09 +00002510enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002511{
2512 struct sk_buff **pp = NULL;
2513 struct packet_type *ptype;
2514 __be16 type = skb->protocol;
2515 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd2008-12-26 14:57:42 -08002516 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002517 int mac_len;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002518 enum gro_result ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002519
2520 if (!(skb->dev->features & NETIF_F_GRO))
2521 goto normal;
2522
David S. Miller4cf704f2009-06-09 00:18:51 -07002523 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002524 goto normal;
2525
Herbert Xud565b0a2008-12-15 23:38:52 -08002526 rcu_read_lock();
2527 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002528 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2529 continue;
2530
Herbert Xu86911732009-01-29 14:19:50 +00002531 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002532 mac_len = skb->network_header - skb->mac_header;
2533 skb->mac_len = mac_len;
2534 NAPI_GRO_CB(skb)->same_flow = 0;
2535 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002536 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002537
Herbert Xud565b0a2008-12-15 23:38:52 -08002538 pp = ptype->gro_receive(&napi->gro_list, skb);
2539 break;
2540 }
2541 rcu_read_unlock();
2542
2543 if (&ptype->list == head)
2544 goto normal;
2545
Herbert Xu0da2afd2008-12-26 14:57:42 -08002546 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002547 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd2008-12-26 14:57:42 -08002548
Herbert Xud565b0a2008-12-15 23:38:52 -08002549 if (pp) {
2550 struct sk_buff *nskb = *pp;
2551
2552 *pp = nskb->next;
2553 nskb->next = NULL;
2554 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002555 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002556 }
2557
Herbert Xu0da2afd2008-12-26 14:57:42 -08002558 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002559 goto ok;
2560
Herbert Xu4ae55442009-02-08 18:00:36 +00002561 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002562 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002563
Herbert Xu4ae55442009-02-08 18:00:36 +00002564 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002565 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002566 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002567 skb->next = napi->gro_list;
2568 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002569 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002570
Herbert Xuad0f9902009-02-01 01:24:55 -08002571pull:
Herbert Xucb189782009-05-26 18:50:31 +00002572 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2573 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2574
2575 BUG_ON(skb->end - skb->tail < grow);
2576
2577 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2578
2579 skb->tail += grow;
2580 skb->data_len -= grow;
2581
2582 skb_shinfo(skb)->frags[0].page_offset += grow;
2583 skb_shinfo(skb)->frags[0].size -= grow;
2584
2585 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2586 put_page(skb_shinfo(skb)->frags[0].page);
2587 memmove(skb_shinfo(skb)->frags,
2588 skb_shinfo(skb)->frags + 1,
2589 --skb_shinfo(skb)->nr_frags);
2590 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002591 }
2592
Herbert Xud565b0a2008-12-15 23:38:52 -08002593ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002594 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002595
2596normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002597 ret = GRO_NORMAL;
2598 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002599}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002600EXPORT_SYMBOL(dev_gro_receive);
2601
Ben Hutchings5b252f02009-10-29 07:17:09 +00002602static gro_result_t
2603__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002604{
2605 struct sk_buff *p;
2606
Herbert Xud1c76af2009-03-16 10:50:02 -07002607 if (netpoll_rx_on(skb))
2608 return GRO_NORMAL;
2609
Herbert Xu96e93ea2009-01-06 10:49:34 -08002610 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002611 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2612 && !compare_ether_header(skb_mac_header(p),
2613 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002614 NAPI_GRO_CB(p)->flush = 0;
2615 }
2616
2617 return dev_gro_receive(napi, skb);
2618}
Herbert Xu5d38a072009-01-04 16:13:40 -08002619
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002620gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002621{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002622 switch (ret) {
2623 case GRO_NORMAL:
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002624 if (netif_receive_skb(skb))
2625 ret = GRO_DROP;
2626 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002627
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002628 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002629 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002630 kfree_skb(skb);
2631 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002632
2633 case GRO_HELD:
2634 case GRO_MERGED:
2635 break;
Herbert Xu5d38a072009-01-04 16:13:40 -08002636 }
2637
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002638 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002639}
2640EXPORT_SYMBOL(napi_skb_finish);
2641
Herbert Xu78a478d2009-05-26 18:50:21 +00002642void skb_gro_reset_offset(struct sk_buff *skb)
2643{
2644 NAPI_GRO_CB(skb)->data_offset = 0;
2645 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002646 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002647
Herbert Xu78d3fd02009-05-26 18:50:23 +00002648 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002649 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002650 NAPI_GRO_CB(skb)->frag0 =
2651 page_address(skb_shinfo(skb)->frags[0].page) +
2652 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002653 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2654 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002655}
2656EXPORT_SYMBOL(skb_gro_reset_offset);
2657
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002658gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002659{
Herbert Xu86911732009-01-29 14:19:50 +00002660 skb_gro_reset_offset(skb);
2661
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002662 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002663}
2664EXPORT_SYMBOL(napi_gro_receive);
2665
Herbert Xu96e93ea2009-01-06 10:49:34 -08002666void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2667{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002668 __skb_pull(skb, skb_headlen(skb));
2669 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2670
2671 napi->skb = skb;
2672}
2673EXPORT_SYMBOL(napi_reuse_skb);
2674
Herbert Xu76620aa2009-04-16 02:02:07 -07002675struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002676{
Herbert Xu5d38a072009-01-04 16:13:40 -08002677 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002678
2679 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002680 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2681 if (skb)
2682 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002683 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002684 return skb;
2685}
Herbert Xu76620aa2009-04-16 02:02:07 -07002686EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002687
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002688gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
2689 gro_result_t ret)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002690{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002691 switch (ret) {
2692 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002693 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002694 skb->protocol = eth_type_trans(skb, napi->dev);
2695
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002696 if (ret == GRO_HELD)
2697 skb_gro_pull(skb, -ETH_HLEN);
2698 else if (netif_receive_skb(skb))
2699 ret = GRO_DROP;
Herbert Xu86911732009-01-29 14:19:50 +00002700 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002701
2702 case GRO_DROP:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002703 case GRO_MERGED_FREE:
2704 napi_reuse_skb(napi, skb);
2705 break;
Ben Hutchings5b252f02009-10-29 07:17:09 +00002706
2707 case GRO_MERGED:
2708 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002709 }
2710
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002711 return ret;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002712}
2713EXPORT_SYMBOL(napi_frags_finish);
2714
Herbert Xu76620aa2009-04-16 02:02:07 -07002715struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002716{
Herbert Xu76620aa2009-04-16 02:02:07 -07002717 struct sk_buff *skb = napi->skb;
2718 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002719 unsigned int hlen;
2720 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002721
2722 napi->skb = NULL;
2723
2724 skb_reset_mac_header(skb);
2725 skb_gro_reset_offset(skb);
2726
Herbert Xua5b1cf22009-05-26 18:50:28 +00002727 off = skb_gro_offset(skb);
2728 hlen = off + sizeof(*eth);
2729 eth = skb_gro_header_fast(skb, off);
2730 if (skb_gro_header_hard(skb, hlen)) {
2731 eth = skb_gro_header_slow(skb, hlen, off);
2732 if (unlikely(!eth)) {
2733 napi_reuse_skb(napi, skb);
2734 skb = NULL;
2735 goto out;
2736 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002737 }
2738
2739 skb_gro_pull(skb, sizeof(*eth));
2740
2741 /*
2742 * This works because the only protocols we care about don't require
2743 * special handling. We'll fix it up properly at the end.
2744 */
2745 skb->protocol = eth->h_proto;
2746
2747out:
2748 return skb;
2749}
2750EXPORT_SYMBOL(napi_frags_skb);
2751
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002752gro_result_t napi_gro_frags(struct napi_struct *napi)
Herbert Xu76620aa2009-04-16 02:02:07 -07002753{
2754 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002755
2756 if (!skb)
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -07002757 return GRO_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002758
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002759 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002760}
2761EXPORT_SYMBOL(napi_gro_frags);
2762
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002763static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764{
2765 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2767 unsigned long start_time = jiffies;
2768
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002769 napi->weight = weight_p;
2770 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773 local_irq_disable();
2774 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002775 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002776 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002777 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002778 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 local_irq_enable();
2781
Herbert Xu8f1ead22009-03-26 00:59:10 -07002782 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002783 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002785 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786}
2787
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002788/**
2789 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002790 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002791 *
2792 * The entry's receive function will be scheduled to run
2793 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002794void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002795{
2796 unsigned long flags;
2797
2798 local_irq_save(flags);
2799 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2800 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2801 local_irq_restore(flags);
2802}
2803EXPORT_SYMBOL(__napi_schedule);
2804
Herbert Xud565b0a2008-12-15 23:38:52 -08002805void __napi_complete(struct napi_struct *n)
2806{
2807 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2808 BUG_ON(n->gro_list);
2809
2810 list_del(&n->poll_list);
2811 smp_mb__before_clear_bit();
2812 clear_bit(NAPI_STATE_SCHED, &n->state);
2813}
2814EXPORT_SYMBOL(__napi_complete);
2815
2816void napi_complete(struct napi_struct *n)
2817{
2818 unsigned long flags;
2819
2820 /*
2821 * don't let napi dequeue from the cpu poll list
2822 * just in case its running on a different cpu
2823 */
2824 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2825 return;
2826
2827 napi_gro_flush(n);
2828 local_irq_save(flags);
2829 __napi_complete(n);
2830 local_irq_restore(flags);
2831}
2832EXPORT_SYMBOL(napi_complete);
2833
2834void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2835 int (*poll)(struct napi_struct *, int), int weight)
2836{
2837 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002838 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002839 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002840 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002841 napi->poll = poll;
2842 napi->weight = weight;
2843 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002844 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002845#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002846 spin_lock_init(&napi->poll_lock);
2847 napi->poll_owner = -1;
2848#endif
2849 set_bit(NAPI_STATE_SCHED, &napi->state);
2850}
2851EXPORT_SYMBOL(netif_napi_add);
2852
2853void netif_napi_del(struct napi_struct *napi)
2854{
2855 struct sk_buff *skb, *next;
2856
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002857 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002858 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002859
2860 for (skb = napi->gro_list; skb; skb = next) {
2861 next = skb->next;
2862 skb->next = NULL;
2863 kfree_skb(skb);
2864 }
2865
2866 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002867 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002868}
2869EXPORT_SYMBOL(netif_napi_del);
2870
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872static void net_rx_action(struct softirq_action *h)
2873{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002874 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002875 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002876 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002877 void *have;
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 local_irq_disable();
2880
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002881 while (!list_empty(list)) {
2882 struct napi_struct *n;
2883 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002885 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002886 * Allow this to run for 2 jiffies since which will allow
2887 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002888 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002889 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 goto softnet_break;
2891
2892 local_irq_enable();
2893
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002894 /* Even though interrupts have been re-enabled, this
2895 * access is safe because interrupts can only add new
2896 * entries to the tail of this list, and only ->poll()
2897 * calls can remove this head entry from the list.
2898 */
2899 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002901 have = netpoll_poll_lock(n);
2902
2903 weight = n->weight;
2904
David S. Miller0a7606c2007-10-29 21:28:47 -07002905 /* This NAPI_STATE_SCHED test is for avoiding a race
2906 * with netpoll's poll_napi(). Only the entity which
2907 * obtains the lock and sees NAPI_STATE_SCHED set will
2908 * actually make the ->poll() call. Therefore we avoid
2909 * accidently calling ->poll() when NAPI is not scheduled.
2910 */
2911 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002912 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002913 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002914 trace_napi_poll(n);
2915 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002916
2917 WARN_ON_ONCE(work > weight);
2918
2919 budget -= work;
2920
2921 local_irq_disable();
2922
2923 /* Drivers must not modify the NAPI state if they
2924 * consume the entire weight. In such cases this code
2925 * still "owns" the NAPI instance and therefore can
2926 * move the instance around on the list at-will.
2927 */
David S. Millerfed17f32008-01-07 21:00:40 -08002928 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002929 if (unlikely(napi_disable_pending(n))) {
2930 local_irq_enable();
2931 napi_complete(n);
2932 local_irq_disable();
2933 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002934 list_move_tail(&n->poll_list, list);
2935 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002936
2937 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 }
2939out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002940 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002941
Chris Leechdb217332006-06-17 21:24:58 -07002942#ifdef CONFIG_NET_DMA
2943 /*
2944 * There may not be any more sk_buffs coming right now, so push
2945 * any pending DMA copies to hardware
2946 */
Dan Williams2ba05622009-01-06 11:38:14 -07002947 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002948#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 return;
2951
2952softnet_break:
2953 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2954 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2955 goto out;
2956}
2957
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002958static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959
2960/**
2961 * register_gifconf - register a SIOCGIF handler
2962 * @family: Address family
2963 * @gifconf: Function handler
2964 *
2965 * Register protocol dependent address dumping routines. The handler
2966 * that is passed must not be freed or reused until it has been replaced
2967 * by another handler.
2968 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002969int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970{
2971 if (family >= NPROTO)
2972 return -EINVAL;
2973 gifconf_list[family] = gifconf;
2974 return 0;
2975}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002976EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
2978
2979/*
2980 * Map an interface index to its name (SIOCGIFNAME)
2981 */
2982
2983/*
2984 * We need this ioctl for efficient implementation of the
2985 * if_indextoname() function required by the IPv6 API. Without
2986 * it, we would have to search all the interfaces to find a
2987 * match. --pb
2988 */
2989
Eric W. Biederman881d9662007-09-17 11:56:21 -07002990static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991{
2992 struct net_device *dev;
2993 struct ifreq ifr;
2994
2995 /*
2996 * Fetch the caller's info block.
2997 */
2998
2999 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3000 return -EFAULT;
3001
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003002 rcu_read_lock();
3003 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 if (!dev) {
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003005 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 return -ENODEV;
3007 }
3008
3009 strcpy(ifr.ifr_name, dev->name);
Eric Dumazetfb699dfd2009-10-19 19:18:49 +00003010 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
3012 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3013 return -EFAULT;
3014 return 0;
3015}
3016
3017/*
3018 * Perform a SIOCGIFCONF call. This structure will change
3019 * size eventually, and there is nothing I can do about it.
3020 * Thus we will need a 'compatibility mode'.
3021 */
3022
Eric W. Biederman881d9662007-09-17 11:56:21 -07003023static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024{
3025 struct ifconf ifc;
3026 struct net_device *dev;
3027 char __user *pos;
3028 int len;
3029 int total;
3030 int i;
3031
3032 /*
3033 * Fetch the caller's info block.
3034 */
3035
3036 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3037 return -EFAULT;
3038
3039 pos = ifc.ifc_buf;
3040 len = ifc.ifc_len;
3041
3042 /*
3043 * Loop over the interfaces, and write an info block for each.
3044 */
3045
3046 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003047 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048 for (i = 0; i < NPROTO; i++) {
3049 if (gifconf_list[i]) {
3050 int done;
3051 if (!pos)
3052 done = gifconf_list[i](dev, NULL, 0);
3053 else
3054 done = gifconf_list[i](dev, pos + total,
3055 len - total);
3056 if (done < 0)
3057 return -EFAULT;
3058 total += done;
3059 }
3060 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003061 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
3063 /*
3064 * All done. Write the updated control block back to the caller.
3065 */
3066 ifc.ifc_len = total;
3067
3068 /*
3069 * Both BSD and Solaris return 0 here, so we do too.
3070 */
3071 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3072}
3073
3074#ifdef CONFIG_PROC_FS
3075/*
3076 * This is invoked by the /proc filesystem handler to display a device
3077 * in detail.
3078 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003080 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081{
Denis V. Luneve372c412007-11-19 22:31:54 -08003082 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003083 loff_t off;
3084 struct net_device *dev;
3085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003087 if (!*pos)
3088 return SEQ_START_TOKEN;
3089
3090 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003091 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003092 if (off++ == *pos)
3093 return dev;
3094
3095 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096}
3097
3098void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3099{
Denis V. Luneve372c412007-11-19 22:31:54 -08003100 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003102 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003103 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104}
3105
3106void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003107 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108{
3109 read_unlock(&dev_base_lock);
3110}
3111
3112static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3113{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003114 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115
Rusty Russell5a1b5892007-04-28 21:04:03 -07003116 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3117 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3118 dev->name, stats->rx_bytes, stats->rx_packets,
3119 stats->rx_errors,
3120 stats->rx_dropped + stats->rx_missed_errors,
3121 stats->rx_fifo_errors,
3122 stats->rx_length_errors + stats->rx_over_errors +
3123 stats->rx_crc_errors + stats->rx_frame_errors,
3124 stats->rx_compressed, stats->multicast,
3125 stats->tx_bytes, stats->tx_packets,
3126 stats->tx_errors, stats->tx_dropped,
3127 stats->tx_fifo_errors, stats->collisions,
3128 stats->tx_carrier_errors +
3129 stats->tx_aborted_errors +
3130 stats->tx_window_errors +
3131 stats->tx_heartbeat_errors,
3132 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133}
3134
3135/*
3136 * Called from the PROCfs module. This now uses the new arbitrary sized
3137 * /proc/net interface to create /proc/net/dev
3138 */
3139static int dev_seq_show(struct seq_file *seq, void *v)
3140{
3141 if (v == SEQ_START_TOKEN)
3142 seq_puts(seq, "Inter-| Receive "
3143 " | Transmit\n"
3144 " face |bytes packets errs drop fifo frame "
3145 "compressed multicast|bytes packets errs "
3146 "drop fifo colls carrier compressed\n");
3147 else
3148 dev_seq_printf_stats(seq, v);
3149 return 0;
3150}
3151
3152static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3153{
3154 struct netif_rx_stats *rc = NULL;
3155
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003156 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003157 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 rc = &per_cpu(netdev_rx_stat, *pos);
3159 break;
3160 } else
3161 ++*pos;
3162 return rc;
3163}
3164
3165static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3166{
3167 return softnet_get_online(pos);
3168}
3169
3170static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3171{
3172 ++*pos;
3173 return softnet_get_online(pos);
3174}
3175
3176static void softnet_seq_stop(struct seq_file *seq, void *v)
3177{
3178}
3179
3180static int softnet_seq_show(struct seq_file *seq, void *v)
3181{
3182 struct netif_rx_stats *s = v;
3183
3184 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003185 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003186 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003187 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 return 0;
3189}
3190
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003191static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 .start = dev_seq_start,
3193 .next = dev_seq_next,
3194 .stop = dev_seq_stop,
3195 .show = dev_seq_show,
3196};
3197
3198static int dev_seq_open(struct inode *inode, struct file *file)
3199{
Denis V. Luneve372c412007-11-19 22:31:54 -08003200 return seq_open_net(inode, file, &dev_seq_ops,
3201 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202}
3203
Arjan van de Ven9a321442007-02-12 00:55:35 -08003204static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 .owner = THIS_MODULE,
3206 .open = dev_seq_open,
3207 .read = seq_read,
3208 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003209 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210};
3211
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003212static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 .start = softnet_seq_start,
3214 .next = softnet_seq_next,
3215 .stop = softnet_seq_stop,
3216 .show = softnet_seq_show,
3217};
3218
3219static int softnet_seq_open(struct inode *inode, struct file *file)
3220{
3221 return seq_open(file, &softnet_seq_ops);
3222}
3223
Arjan van de Ven9a321442007-02-12 00:55:35 -08003224static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 .owner = THIS_MODULE,
3226 .open = softnet_seq_open,
3227 .read = seq_read,
3228 .llseek = seq_lseek,
3229 .release = seq_release,
3230};
3231
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003232static void *ptype_get_idx(loff_t pos)
3233{
3234 struct packet_type *pt = NULL;
3235 loff_t i = 0;
3236 int t;
3237
3238 list_for_each_entry_rcu(pt, &ptype_all, list) {
3239 if (i == pos)
3240 return pt;
3241 ++i;
3242 }
3243
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003244 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003245 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3246 if (i == pos)
3247 return pt;
3248 ++i;
3249 }
3250 }
3251 return NULL;
3252}
3253
3254static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003255 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003256{
3257 rcu_read_lock();
3258 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3259}
3260
3261static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3262{
3263 struct packet_type *pt;
3264 struct list_head *nxt;
3265 int hash;
3266
3267 ++*pos;
3268 if (v == SEQ_START_TOKEN)
3269 return ptype_get_idx(0);
3270
3271 pt = v;
3272 nxt = pt->list.next;
3273 if (pt->type == htons(ETH_P_ALL)) {
3274 if (nxt != &ptype_all)
3275 goto found;
3276 hash = 0;
3277 nxt = ptype_base[0].next;
3278 } else
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003279 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003280
3281 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08003282 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003283 return NULL;
3284 nxt = ptype_base[hash].next;
3285 }
3286found:
3287 return list_entry(nxt, struct packet_type, list);
3288}
3289
3290static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003291 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003292{
3293 rcu_read_unlock();
3294}
3295
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003296static int ptype_seq_show(struct seq_file *seq, void *v)
3297{
3298 struct packet_type *pt = v;
3299
3300 if (v == SEQ_START_TOKEN)
3301 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003302 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003303 if (pt->type == htons(ETH_P_ALL))
3304 seq_puts(seq, "ALL ");
3305 else
3306 seq_printf(seq, "%04x", ntohs(pt->type));
3307
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003308 seq_printf(seq, " %-8s %pF\n",
3309 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003310 }
3311
3312 return 0;
3313}
3314
3315static const struct seq_operations ptype_seq_ops = {
3316 .start = ptype_seq_start,
3317 .next = ptype_seq_next,
3318 .stop = ptype_seq_stop,
3319 .show = ptype_seq_show,
3320};
3321
3322static int ptype_seq_open(struct inode *inode, struct file *file)
3323{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003324 return seq_open_net(inode, file, &ptype_seq_ops,
3325 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003326}
3327
3328static const struct file_operations ptype_seq_fops = {
3329 .owner = THIS_MODULE,
3330 .open = ptype_seq_open,
3331 .read = seq_read,
3332 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003333 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003334};
3335
3336
Pavel Emelyanov46650792007-10-08 20:38:39 -07003337static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338{
3339 int rc = -ENOMEM;
3340
Eric W. Biederman881d9662007-09-17 11:56:21 -07003341 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003343 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003344 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003345 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003346 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003347
Eric W. Biederman881d9662007-09-17 11:56:21 -07003348 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003349 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 rc = 0;
3351out:
3352 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003353out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003354 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003356 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003358 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003359 goto out;
3360}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003361
Pavel Emelyanov46650792007-10-08 20:38:39 -07003362static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003363{
3364 wext_proc_exit(net);
3365
3366 proc_net_remove(net, "ptype");
3367 proc_net_remove(net, "softnet_stat");
3368 proc_net_remove(net, "dev");
3369}
3370
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003371static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003372 .init = dev_proc_net_init,
3373 .exit = dev_proc_net_exit,
3374};
3375
3376static int __init dev_proc_init(void)
3377{
3378 return register_pernet_subsys(&dev_proc_ops);
3379}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380#else
3381#define dev_proc_init() 0
3382#endif /* CONFIG_PROC_FS */
3383
3384
3385/**
3386 * netdev_set_master - set up master/slave pair
3387 * @slave: slave device
3388 * @master: new master device
3389 *
3390 * Changes the master device of the slave. Pass %NULL to break the
3391 * bonding. The caller must hold the RTNL semaphore. On a failure
3392 * a negative errno code is returned. On success the reference counts
3393 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3394 * function returns zero.
3395 */
3396int netdev_set_master(struct net_device *slave, struct net_device *master)
3397{
3398 struct net_device *old = slave->master;
3399
3400 ASSERT_RTNL();
3401
3402 if (master) {
3403 if (old)
3404 return -EBUSY;
3405 dev_hold(master);
3406 }
3407
3408 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003409
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 synchronize_net();
3411
3412 if (old)
3413 dev_put(old);
3414
3415 if (master)
3416 slave->flags |= IFF_SLAVE;
3417 else
3418 slave->flags &= ~IFF_SLAVE;
3419
3420 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3421 return 0;
3422}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003423EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003425static void dev_change_rx_flags(struct net_device *dev, int flags)
3426{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003427 const struct net_device_ops *ops = dev->netdev_ops;
3428
3429 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3430 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003431}
3432
Wang Chendad9b332008-06-18 01:48:28 -07003433static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003434{
3435 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003436 uid_t uid;
3437 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003438
Patrick McHardy24023452007-07-14 18:51:31 -07003439 ASSERT_RTNL();
3440
Wang Chendad9b332008-06-18 01:48:28 -07003441 dev->flags |= IFF_PROMISC;
3442 dev->promiscuity += inc;
3443 if (dev->promiscuity == 0) {
3444 /*
3445 * Avoid overflow.
3446 * If inc causes overflow, untouch promisc and return error.
3447 */
3448 if (inc < 0)
3449 dev->flags &= ~IFF_PROMISC;
3450 else {
3451 dev->promiscuity -= inc;
3452 printk(KERN_WARNING "%s: promiscuity touches roof, "
3453 "set promiscuity failed, promiscuity feature "
3454 "of device might be broken.\n", dev->name);
3455 return -EOVERFLOW;
3456 }
3457 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003458 if (dev->flags != old_flags) {
3459 printk(KERN_INFO "device %s %s promiscuous mode\n",
3460 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3461 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003462 if (audit_enabled) {
3463 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003464 audit_log(current->audit_context, GFP_ATOMIC,
3465 AUDIT_ANOM_PROMISCUOUS,
3466 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3467 dev->name, (dev->flags & IFF_PROMISC),
3468 (old_flags & IFF_PROMISC),
3469 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003470 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003471 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003472 }
Patrick McHardy24023452007-07-14 18:51:31 -07003473
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003474 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003475 }
Wang Chendad9b332008-06-18 01:48:28 -07003476 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003477}
3478
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479/**
3480 * dev_set_promiscuity - update promiscuity count on a device
3481 * @dev: device
3482 * @inc: modifier
3483 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003484 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 * remains above zero the interface remains promiscuous. Once it hits zero
3486 * the device reverts back to normal filtering operation. A negative inc
3487 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003488 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 */
Wang Chendad9b332008-06-18 01:48:28 -07003490int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491{
3492 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003493 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494
Wang Chendad9b332008-06-18 01:48:28 -07003495 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003496 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003497 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003498 if (dev->flags != old_flags)
3499 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003500 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003502EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
3504/**
3505 * dev_set_allmulti - update allmulti count on a device
3506 * @dev: device
3507 * @inc: modifier
3508 *
3509 * Add or remove reception of all multicast frames to a device. While the
3510 * count in the device remains above zero the interface remains listening
3511 * to all interfaces. Once it hits zero the device reverts back to normal
3512 * filtering operation. A negative @inc value is used to drop the counter
3513 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003514 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 */
3516
Wang Chendad9b332008-06-18 01:48:28 -07003517int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518{
3519 unsigned short old_flags = dev->flags;
3520
Patrick McHardy24023452007-07-14 18:51:31 -07003521 ASSERT_RTNL();
3522
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003524 dev->allmulti += inc;
3525 if (dev->allmulti == 0) {
3526 /*
3527 * Avoid overflow.
3528 * If inc causes overflow, untouch allmulti and return error.
3529 */
3530 if (inc < 0)
3531 dev->flags &= ~IFF_ALLMULTI;
3532 else {
3533 dev->allmulti -= inc;
3534 printk(KERN_WARNING "%s: allmulti touches roof, "
3535 "set allmulti failed, allmulti feature of "
3536 "device might be broken.\n", dev->name);
3537 return -EOVERFLOW;
3538 }
3539 }
Patrick McHardy24023452007-07-14 18:51:31 -07003540 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003541 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003542 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003543 }
Wang Chendad9b332008-06-18 01:48:28 -07003544 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003545}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003546EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003547
3548/*
3549 * Upload unicast and multicast address lists to device and
3550 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003551 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003552 * are present.
3553 */
3554void __dev_set_rx_mode(struct net_device *dev)
3555{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003556 const struct net_device_ops *ops = dev->netdev_ops;
3557
Patrick McHardy4417da62007-06-27 01:28:10 -07003558 /* dev_open will call this function so the list will stay sane. */
3559 if (!(dev->flags&IFF_UP))
3560 return;
3561
3562 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003563 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003564
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003565 if (ops->ndo_set_rx_mode)
3566 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003567 else {
3568 /* Unicast addresses changes may only happen under the rtnl,
3569 * therefore calling __dev_set_promiscuity here is safe.
3570 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003571 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003572 __dev_set_promiscuity(dev, 1);
3573 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003574 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003575 __dev_set_promiscuity(dev, -1);
3576 dev->uc_promisc = 0;
3577 }
3578
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003579 if (ops->ndo_set_multicast_list)
3580 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003581 }
3582}
3583
3584void dev_set_rx_mode(struct net_device *dev)
3585{
David S. Millerb9e40852008-07-15 00:15:08 -07003586 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003587 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003588 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589}
3590
Jiri Pirkof001fde2009-05-05 02:48:28 +00003591/* hw addresses list handling functions */
3592
Jiri Pirko31278e72009-06-17 01:12:19 +00003593static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3594 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003595{
3596 struct netdev_hw_addr *ha;
3597 int alloc_size;
3598
3599 if (addr_len > MAX_ADDR_LEN)
3600 return -EINVAL;
3601
Jiri Pirko31278e72009-06-17 01:12:19 +00003602 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003603 if (!memcmp(ha->addr, addr, addr_len) &&
3604 ha->type == addr_type) {
3605 ha->refcount++;
3606 return 0;
3607 }
3608 }
3609
3610
Jiri Pirkof001fde2009-05-05 02:48:28 +00003611 alloc_size = sizeof(*ha);
3612 if (alloc_size < L1_CACHE_BYTES)
3613 alloc_size = L1_CACHE_BYTES;
3614 ha = kmalloc(alloc_size, GFP_ATOMIC);
3615 if (!ha)
3616 return -ENOMEM;
3617 memcpy(ha->addr, addr, addr_len);
3618 ha->type = addr_type;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003619 ha->refcount = 1;
3620 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003621 list_add_tail_rcu(&ha->list, &list->list);
3622 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003623 return 0;
3624}
3625
3626static void ha_rcu_free(struct rcu_head *head)
3627{
3628 struct netdev_hw_addr *ha;
3629
3630 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3631 kfree(ha);
3632}
3633
Jiri Pirko31278e72009-06-17 01:12:19 +00003634static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3635 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003636{
3637 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003638
Jiri Pirko31278e72009-06-17 01:12:19 +00003639 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003640 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003641 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003642 if (--ha->refcount)
3643 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003644 list_del_rcu(&ha->list);
3645 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003646 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003647 return 0;
3648 }
3649 }
3650 return -ENOENT;
3651}
3652
Jiri Pirko31278e72009-06-17 01:12:19 +00003653static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3654 struct netdev_hw_addr_list *from_list,
3655 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003656 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003657{
3658 int err;
3659 struct netdev_hw_addr *ha, *ha2;
3660 unsigned char type;
3661
Jiri Pirko31278e72009-06-17 01:12:19 +00003662 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003663 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003664 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003665 if (err)
3666 goto unroll;
3667 }
3668 return 0;
3669
3670unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003671 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003672 if (ha2 == ha)
3673 break;
3674 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003675 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003676 }
3677 return err;
3678}
3679
Jiri Pirko31278e72009-06-17 01:12:19 +00003680static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3681 struct netdev_hw_addr_list *from_list,
3682 int addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003683 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003684{
3685 struct netdev_hw_addr *ha;
3686 unsigned char type;
3687
Jiri Pirko31278e72009-06-17 01:12:19 +00003688 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003689 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003690 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003691 }
3692}
3693
Jiri Pirko31278e72009-06-17 01:12:19 +00003694static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3695 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003696 int addr_len)
3697{
3698 int err = 0;
3699 struct netdev_hw_addr *ha, *tmp;
3700
Jiri Pirko31278e72009-06-17 01:12:19 +00003701 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003702 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003703 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003704 addr_len, ha->type);
3705 if (err)
3706 break;
3707 ha->synced = true;
3708 ha->refcount++;
3709 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003710 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3711 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003712 }
3713 }
3714 return err;
3715}
3716
Jiri Pirko31278e72009-06-17 01:12:19 +00003717static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3718 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003719 int addr_len)
3720{
3721 struct netdev_hw_addr *ha, *tmp;
3722
Jiri Pirko31278e72009-06-17 01:12:19 +00003723 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad22009-05-22 23:22:17 +00003724 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003725 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003726 addr_len, ha->type);
3727 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003728 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003729 addr_len, ha->type);
3730 }
3731 }
3732}
3733
Jiri Pirko31278e72009-06-17 01:12:19 +00003734static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003735{
3736 struct netdev_hw_addr *ha, *tmp;
3737
Jiri Pirko31278e72009-06-17 01:12:19 +00003738 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003739 list_del_rcu(&ha->list);
3740 call_rcu(&ha->rcu_head, ha_rcu_free);
3741 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003742 list->count = 0;
3743}
3744
3745static void __hw_addr_init(struct netdev_hw_addr_list *list)
3746{
3747 INIT_LIST_HEAD(&list->list);
3748 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003749}
3750
3751/* Device addresses handling functions */
3752
3753static void dev_addr_flush(struct net_device *dev)
3754{
3755 /* rtnl_mutex must be held here */
3756
Jiri Pirko31278e72009-06-17 01:12:19 +00003757 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003758 dev->dev_addr = NULL;
3759}
3760
3761static int dev_addr_init(struct net_device *dev)
3762{
3763 unsigned char addr[MAX_ADDR_LEN];
3764 struct netdev_hw_addr *ha;
3765 int err;
3766
3767 /* rtnl_mutex must be held here */
3768
Jiri Pirko31278e72009-06-17 01:12:19 +00003769 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003770 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003771 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003772 NETDEV_HW_ADDR_T_LAN);
3773 if (!err) {
3774 /*
3775 * Get the first (previously created) address from the list
3776 * and set dev_addr pointer to this location.
3777 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003778 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003779 struct netdev_hw_addr, list);
3780 dev->dev_addr = ha->addr;
3781 }
3782 return err;
3783}
3784
3785/**
3786 * dev_addr_add - Add a device address
3787 * @dev: device
3788 * @addr: address to add
3789 * @addr_type: address type
3790 *
3791 * Add a device address to the device or increase the reference count if
3792 * it already exists.
3793 *
3794 * The caller must hold the rtnl_mutex.
3795 */
3796int dev_addr_add(struct net_device *dev, unsigned char *addr,
3797 unsigned char addr_type)
3798{
3799 int err;
3800
3801 ASSERT_RTNL();
3802
Jiri Pirko31278e72009-06-17 01:12:19 +00003803 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003804 if (!err)
3805 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3806 return err;
3807}
3808EXPORT_SYMBOL(dev_addr_add);
3809
3810/**
3811 * dev_addr_del - Release a device address.
3812 * @dev: device
3813 * @addr: address to delete
3814 * @addr_type: address type
3815 *
3816 * Release reference to a device address and remove it from the device
3817 * if the reference count drops to zero.
3818 *
3819 * The caller must hold the rtnl_mutex.
3820 */
3821int dev_addr_del(struct net_device *dev, unsigned char *addr,
3822 unsigned char addr_type)
3823{
3824 int err;
Jiri Pirkoccffad22009-05-22 23:22:17 +00003825 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003826
3827 ASSERT_RTNL();
3828
Jiri Pirkoccffad22009-05-22 23:22:17 +00003829 /*
3830 * We can not remove the first address from the list because
3831 * dev->dev_addr points to that.
3832 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003833 ha = list_first_entry(&dev->dev_addrs.list,
3834 struct netdev_hw_addr, list);
Jiri Pirkoccffad22009-05-22 23:22:17 +00003835 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3836 return -ENOENT;
3837
Jiri Pirko31278e72009-06-17 01:12:19 +00003838 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003839 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003840 if (!err)
3841 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3842 return err;
3843}
3844EXPORT_SYMBOL(dev_addr_del);
3845
3846/**
3847 * dev_addr_add_multiple - Add device addresses from another device
3848 * @to_dev: device to which addresses will be added
3849 * @from_dev: device from which addresses will be added
3850 * @addr_type: address type - 0 means type will be used from from_dev
3851 *
3852 * Add device addresses of the one device to another.
3853 **
3854 * The caller must hold the rtnl_mutex.
3855 */
3856int dev_addr_add_multiple(struct net_device *to_dev,
3857 struct net_device *from_dev,
3858 unsigned char addr_type)
3859{
3860 int err;
3861
3862 ASSERT_RTNL();
3863
3864 if (from_dev->addr_len != to_dev->addr_len)
3865 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003866 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003867 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003868 if (!err)
3869 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3870 return err;
3871}
3872EXPORT_SYMBOL(dev_addr_add_multiple);
3873
3874/**
3875 * dev_addr_del_multiple - Delete device addresses by another device
3876 * @to_dev: device where the addresses will be deleted
3877 * @from_dev: device by which addresses the addresses will be deleted
3878 * @addr_type: address type - 0 means type will used from from_dev
3879 *
3880 * Deletes addresses in to device by the list of addresses in from device.
3881 *
3882 * The caller must hold the rtnl_mutex.
3883 */
3884int dev_addr_del_multiple(struct net_device *to_dev,
3885 struct net_device *from_dev,
3886 unsigned char addr_type)
3887{
3888 ASSERT_RTNL();
3889
3890 if (from_dev->addr_len != to_dev->addr_len)
3891 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003892 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad22009-05-22 23:22:17 +00003893 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003894 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3895 return 0;
3896}
3897EXPORT_SYMBOL(dev_addr_del_multiple);
3898
Jiri Pirko31278e72009-06-17 01:12:19 +00003899/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003900
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003901int __dev_addr_delete(struct dev_addr_list **list, int *count,
3902 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003903{
3904 struct dev_addr_list *da;
3905
3906 for (; (da = *list) != NULL; list = &da->next) {
3907 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3908 alen == da->da_addrlen) {
3909 if (glbl) {
3910 int old_glbl = da->da_gusers;
3911 da->da_gusers = 0;
3912 if (old_glbl == 0)
3913 break;
3914 }
3915 if (--da->da_users)
3916 return 0;
3917
3918 *list = da->next;
3919 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003920 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003921 return 0;
3922 }
3923 }
3924 return -ENOENT;
3925}
3926
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003927int __dev_addr_add(struct dev_addr_list **list, int *count,
3928 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003929{
3930 struct dev_addr_list *da;
3931
3932 for (da = *list; da != NULL; da = da->next) {
3933 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3934 da->da_addrlen == alen) {
3935 if (glbl) {
3936 int old_glbl = da->da_gusers;
3937 da->da_gusers = 1;
3938 if (old_glbl)
3939 return 0;
3940 }
3941 da->da_users++;
3942 return 0;
3943 }
3944 }
3945
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003946 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003947 if (da == NULL)
3948 return -ENOMEM;
3949 memcpy(da->da_addr, addr, alen);
3950 da->da_addrlen = alen;
3951 da->da_users = 1;
3952 da->da_gusers = glbl ? 1 : 0;
3953 da->next = *list;
3954 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003955 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003956 return 0;
3957}
3958
Patrick McHardy4417da62007-06-27 01:28:10 -07003959/**
3960 * dev_unicast_delete - Release secondary unicast address.
3961 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003962 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003963 *
3964 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003965 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003966 *
3967 * The caller must hold the rtnl_mutex.
3968 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00003969int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003970{
3971 int err;
3972
3973 ASSERT_RTNL();
3974
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003975 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003976 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3977 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003978 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003979 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003980 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003981 return err;
3982}
3983EXPORT_SYMBOL(dev_unicast_delete);
3984
3985/**
3986 * dev_unicast_add - add a secondary unicast address
3987 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003988 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003989 *
3990 * Add a secondary unicast address to the device or increase
3991 * the reference count if it already exists.
3992 *
3993 * The caller must hold the rtnl_mutex.
3994 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00003995int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003996{
3997 int err;
3998
3999 ASSERT_RTNL();
4000
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004001 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004002 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4003 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07004004 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07004005 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004006 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07004007 return err;
4008}
4009EXPORT_SYMBOL(dev_unicast_add);
4010
Chris Leeche83a2ea2008-01-31 16:53:23 -08004011int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4012 struct dev_addr_list **from, int *from_count)
4013{
4014 struct dev_addr_list *da, *next;
4015 int err = 0;
4016
4017 da = *from;
4018 while (da != NULL) {
4019 next = da->next;
4020 if (!da->da_synced) {
4021 err = __dev_addr_add(to, to_count,
4022 da->da_addr, da->da_addrlen, 0);
4023 if (err < 0)
4024 break;
4025 da->da_synced = 1;
4026 da->da_users++;
4027 } else if (da->da_users == 1) {
4028 __dev_addr_delete(to, to_count,
4029 da->da_addr, da->da_addrlen, 0);
4030 __dev_addr_delete(from, from_count,
4031 da->da_addr, da->da_addrlen, 0);
4032 }
4033 da = next;
4034 }
4035 return err;
4036}
Johannes Bergc4029082009-06-17 17:43:30 +02004037EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004038
4039void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4040 struct dev_addr_list **from, int *from_count)
4041{
4042 struct dev_addr_list *da, *next;
4043
4044 da = *from;
4045 while (da != NULL) {
4046 next = da->next;
4047 if (da->da_synced) {
4048 __dev_addr_delete(to, to_count,
4049 da->da_addr, da->da_addrlen, 0);
4050 da->da_synced = 0;
4051 __dev_addr_delete(from, from_count,
4052 da->da_addr, da->da_addrlen, 0);
4053 }
4054 da = next;
4055 }
4056}
Johannes Bergc4029082009-06-17 17:43:30 +02004057EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004058
4059/**
4060 * dev_unicast_sync - Synchronize device's unicast list to another device
4061 * @to: destination device
4062 * @from: source device
4063 *
4064 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004065 * addresses that have no users left. The source device must be
4066 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004067 *
4068 * This function is intended to be called from the dev->set_rx_mode
4069 * function of layered software devices.
4070 */
4071int dev_unicast_sync(struct net_device *to, struct net_device *from)
4072{
4073 int err = 0;
4074
Jiri Pirkoccffad22009-05-22 23:22:17 +00004075 if (to->addr_len != from->addr_len)
4076 return -EINVAL;
4077
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004078 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004079 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004080 if (!err)
4081 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004082 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004083 return err;
4084}
4085EXPORT_SYMBOL(dev_unicast_sync);
4086
4087/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004088 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004089 * @to: destination device
4090 * @from: source device
4091 *
4092 * Remove all addresses that were added to the destination device by
4093 * dev_unicast_sync(). This function is intended to be called from the
4094 * dev->stop function of layered software devices.
4095 */
4096void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4097{
Jiri Pirkoccffad22009-05-22 23:22:17 +00004098 if (to->addr_len != from->addr_len)
4099 return;
4100
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004101 netif_addr_lock_bh(from);
4102 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004103 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004104 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004105 netif_addr_unlock(to);
4106 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004107}
4108EXPORT_SYMBOL(dev_unicast_unsync);
4109
Jiri Pirkoccffad22009-05-22 23:22:17 +00004110static void dev_unicast_flush(struct net_device *dev)
4111{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004112 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004113 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004114 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004115}
4116
4117static void dev_unicast_init(struct net_device *dev)
4118{
Jiri Pirko31278e72009-06-17 01:12:19 +00004119 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad22009-05-22 23:22:17 +00004120}
4121
4122
Denis Cheng12972622007-07-18 02:12:56 -07004123static void __dev_addr_discard(struct dev_addr_list **list)
4124{
4125 struct dev_addr_list *tmp;
4126
4127 while (*list != NULL) {
4128 tmp = *list;
4129 *list = tmp->next;
4130 if (tmp->da_users > tmp->da_gusers)
4131 printk("__dev_addr_discard: address leakage! "
4132 "da_users=%d\n", tmp->da_users);
4133 kfree(tmp);
4134 }
4135}
4136
Denis Cheng26cc2522007-07-18 02:12:03 -07004137static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004138{
David S. Millerb9e40852008-07-15 00:15:08 -07004139 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004140
Denis Cheng456ad752007-07-18 02:10:54 -07004141 __dev_addr_discard(&dev->mc_list);
4142 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004143
David S. Millerb9e40852008-07-15 00:15:08 -07004144 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004145}
4146
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004147/**
4148 * dev_get_flags - get flags reported to userspace
4149 * @dev: device
4150 *
4151 * Get the combination of flag bits exported through APIs to userspace.
4152 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153unsigned dev_get_flags(const struct net_device *dev)
4154{
4155 unsigned flags;
4156
4157 flags = (dev->flags & ~(IFF_PROMISC |
4158 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004159 IFF_RUNNING |
4160 IFF_LOWER_UP |
4161 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 (dev->gflags & (IFF_PROMISC |
4163 IFF_ALLMULTI));
4164
Stefan Rompfb00055a2006-03-20 17:09:11 -08004165 if (netif_running(dev)) {
4166 if (netif_oper_up(dev))
4167 flags |= IFF_RUNNING;
4168 if (netif_carrier_ok(dev))
4169 flags |= IFF_LOWER_UP;
4170 if (netif_dormant(dev))
4171 flags |= IFF_DORMANT;
4172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173
4174 return flags;
4175}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004176EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004178/**
4179 * dev_change_flags - change device settings
4180 * @dev: device
4181 * @flags: device state flags
4182 *
4183 * Change settings on device based state flags. The flags are
4184 * in the userspace exported format.
4185 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186int dev_change_flags(struct net_device *dev, unsigned flags)
4187{
Thomas Graf7c355f52007-06-05 16:03:03 -07004188 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 int old_flags = dev->flags;
4190
Patrick McHardy24023452007-07-14 18:51:31 -07004191 ASSERT_RTNL();
4192
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 /*
4194 * Set the flags on our device.
4195 */
4196
4197 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4198 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4199 IFF_AUTOMEDIA)) |
4200 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4201 IFF_ALLMULTI));
4202
4203 /*
4204 * Load in the correct multicast list now the flags have changed.
4205 */
4206
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004207 if ((old_flags ^ flags) & IFF_MULTICAST)
4208 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004209
Patrick McHardy4417da62007-06-27 01:28:10 -07004210 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
4212 /*
4213 * Have we downed the interface. We handle IFF_UP ourselves
4214 * according to user attempts to set it, rather than blindly
4215 * setting it.
4216 */
4217
4218 ret = 0;
4219 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4220 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4221
4222 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004223 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 }
4225
4226 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004227 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004229 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230
4231 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004232 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4233
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 dev->gflags ^= IFF_PROMISC;
4235 dev_set_promiscuity(dev, inc);
4236 }
4237
4238 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4239 is important. Some (broken) drivers set IFF_PROMISC, when
4240 IFF_ALLMULTI is requested not asking us and not reporting.
4241 */
4242 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004243 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4244
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 dev->gflags ^= IFF_ALLMULTI;
4246 dev_set_allmulti(dev, inc);
4247 }
4248
Thomas Graf7c355f52007-06-05 16:03:03 -07004249 /* Exclude state transition flags, already notified */
4250 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4251 if (changes)
4252 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
4254 return ret;
4255}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004256EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004258/**
4259 * dev_set_mtu - Change maximum transfer unit
4260 * @dev: device
4261 * @new_mtu: new transfer unit
4262 *
4263 * Change the maximum transfer size of the network device.
4264 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265int dev_set_mtu(struct net_device *dev, int new_mtu)
4266{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004267 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 int err;
4269
4270 if (new_mtu == dev->mtu)
4271 return 0;
4272
4273 /* MTU must be positive. */
4274 if (new_mtu < 0)
4275 return -EINVAL;
4276
4277 if (!netif_device_present(dev))
4278 return -ENODEV;
4279
4280 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004281 if (ops->ndo_change_mtu)
4282 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 else
4284 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004285
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004287 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 return err;
4289}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004290EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004292/**
4293 * dev_set_mac_address - Change Media Access Control Address
4294 * @dev: device
4295 * @sa: new address
4296 *
4297 * Change the hardware (MAC) address of the device
4298 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4300{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004301 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 int err;
4303
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004304 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 return -EOPNOTSUPP;
4306 if (sa->sa_family != dev->type)
4307 return -EINVAL;
4308 if (!netif_device_present(dev))
4309 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004310 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004312 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 return err;
4314}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004315EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
4317/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004318 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004320static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321{
4322 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004323 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
4325 if (!dev)
4326 return -ENODEV;
4327
4328 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004329 case SIOCGIFFLAGS: /* Get interface flags */
4330 ifr->ifr_flags = (short) dev_get_flags(dev);
4331 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004333 case SIOCGIFMETRIC: /* Get the metric on the interface
4334 (currently unused) */
4335 ifr->ifr_metric = 0;
4336 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004338 case SIOCGIFMTU: /* Get the MTU of a device */
4339 ifr->ifr_mtu = dev->mtu;
4340 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004342 case SIOCGIFHWADDR:
4343 if (!dev->addr_len)
4344 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4345 else
4346 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4347 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4348 ifr->ifr_hwaddr.sa_family = dev->type;
4349 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004351 case SIOCGIFSLAVE:
4352 err = -EINVAL;
4353 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004354
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004355 case SIOCGIFMAP:
4356 ifr->ifr_map.mem_start = dev->mem_start;
4357 ifr->ifr_map.mem_end = dev->mem_end;
4358 ifr->ifr_map.base_addr = dev->base_addr;
4359 ifr->ifr_map.irq = dev->irq;
4360 ifr->ifr_map.dma = dev->dma;
4361 ifr->ifr_map.port = dev->if_port;
4362 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004363
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004364 case SIOCGIFINDEX:
4365 ifr->ifr_ifindex = dev->ifindex;
4366 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004367
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004368 case SIOCGIFTXQLEN:
4369 ifr->ifr_qlen = dev->tx_queue_len;
4370 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004371
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004372 default:
4373 /* dev_ioctl() should ensure this case
4374 * is never reached
4375 */
4376 WARN_ON(1);
4377 err = -EINVAL;
4378 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004379
4380 }
4381 return err;
4382}
4383
4384/*
4385 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4386 */
4387static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4388{
4389 int err;
4390 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004391 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004392
4393 if (!dev)
4394 return -ENODEV;
4395
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004396 ops = dev->netdev_ops;
4397
Jeff Garzik14e3e072007-10-08 00:06:32 -07004398 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004399 case SIOCSIFFLAGS: /* Set interface flags */
4400 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004401
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004402 case SIOCSIFMETRIC: /* Set the metric on the interface
4403 (currently unused) */
4404 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004405
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004406 case SIOCSIFMTU: /* Set the MTU of a device */
4407 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004408
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004409 case SIOCSIFHWADDR:
4410 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004412 case SIOCSIFHWBROADCAST:
4413 if (ifr->ifr_hwaddr.sa_family != dev->type)
4414 return -EINVAL;
4415 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4416 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4417 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4418 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004420 case SIOCSIFMAP:
4421 if (ops->ndo_set_config) {
4422 if (!netif_device_present(dev))
4423 return -ENODEV;
4424 return ops->ndo_set_config(dev, &ifr->ifr_map);
4425 }
4426 return -EOPNOTSUPP;
4427
4428 case SIOCADDMULTI:
4429 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4430 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4431 return -EINVAL;
4432 if (!netif_device_present(dev))
4433 return -ENODEV;
4434 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4435 dev->addr_len, 1);
4436
4437 case SIOCDELMULTI:
4438 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4439 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4440 return -EINVAL;
4441 if (!netif_device_present(dev))
4442 return -ENODEV;
4443 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4444 dev->addr_len, 1);
4445
4446 case SIOCSIFTXQLEN:
4447 if (ifr->ifr_qlen < 0)
4448 return -EINVAL;
4449 dev->tx_queue_len = ifr->ifr_qlen;
4450 return 0;
4451
4452 case SIOCSIFNAME:
4453 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4454 return dev_change_name(dev, ifr->ifr_newname);
4455
4456 /*
4457 * Unknown or private ioctl
4458 */
4459 default:
4460 if ((cmd >= SIOCDEVPRIVATE &&
4461 cmd <= SIOCDEVPRIVATE + 15) ||
4462 cmd == SIOCBONDENSLAVE ||
4463 cmd == SIOCBONDRELEASE ||
4464 cmd == SIOCBONDSETHWADDR ||
4465 cmd == SIOCBONDSLAVEINFOQUERY ||
4466 cmd == SIOCBONDINFOQUERY ||
4467 cmd == SIOCBONDCHANGEACTIVE ||
4468 cmd == SIOCGMIIPHY ||
4469 cmd == SIOCGMIIREG ||
4470 cmd == SIOCSMIIREG ||
4471 cmd == SIOCBRADDIF ||
4472 cmd == SIOCBRDELIF ||
4473 cmd == SIOCSHWTSTAMP ||
4474 cmd == SIOCWANDEV) {
4475 err = -EOPNOTSUPP;
4476 if (ops->ndo_do_ioctl) {
4477 if (netif_device_present(dev))
4478 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4479 else
4480 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004482 } else
4483 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
4485 }
4486 return err;
4487}
4488
4489/*
4490 * This function handles all "interface"-type I/O control requests. The actual
4491 * 'doing' part of this is dev_ifsioc above.
4492 */
4493
4494/**
4495 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004496 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 * @cmd: command to issue
4498 * @arg: pointer to a struct ifreq in user space
4499 *
4500 * Issue ioctl functions to devices. This is normally called by the
4501 * user space syscall interfaces but can sometimes be useful for
4502 * other purposes. The return value is the return from the syscall if
4503 * positive or a negative errno code on error.
4504 */
4505
Eric W. Biederman881d9662007-09-17 11:56:21 -07004506int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507{
4508 struct ifreq ifr;
4509 int ret;
4510 char *colon;
4511
4512 /* One special case: SIOCGIFCONF takes ifconf argument
4513 and requires shared lock, because it sleeps writing
4514 to user space.
4515 */
4516
4517 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004518 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004519 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004520 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521 return ret;
4522 }
4523 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004524 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
4526 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4527 return -EFAULT;
4528
4529 ifr.ifr_name[IFNAMSIZ-1] = 0;
4530
4531 colon = strchr(ifr.ifr_name, ':');
4532 if (colon)
4533 *colon = 0;
4534
4535 /*
4536 * See which interface the caller is talking about.
4537 */
4538
4539 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004540 /*
4541 * These ioctl calls:
4542 * - can be done by all.
4543 * - atomic and do not require locking.
4544 * - return a value
4545 */
4546 case SIOCGIFFLAGS:
4547 case SIOCGIFMETRIC:
4548 case SIOCGIFMTU:
4549 case SIOCGIFHWADDR:
4550 case SIOCGIFSLAVE:
4551 case SIOCGIFMAP:
4552 case SIOCGIFINDEX:
4553 case SIOCGIFTXQLEN:
4554 dev_load(net, ifr.ifr_name);
4555 read_lock(&dev_base_lock);
4556 ret = dev_ifsioc_locked(net, &ifr, cmd);
4557 read_unlock(&dev_base_lock);
4558 if (!ret) {
4559 if (colon)
4560 *colon = ':';
4561 if (copy_to_user(arg, &ifr,
4562 sizeof(struct ifreq)))
4563 ret = -EFAULT;
4564 }
4565 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004567 case SIOCETHTOOL:
4568 dev_load(net, ifr.ifr_name);
4569 rtnl_lock();
4570 ret = dev_ethtool(net, &ifr);
4571 rtnl_unlock();
4572 if (!ret) {
4573 if (colon)
4574 *colon = ':';
4575 if (copy_to_user(arg, &ifr,
4576 sizeof(struct ifreq)))
4577 ret = -EFAULT;
4578 }
4579 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004581 /*
4582 * These ioctl calls:
4583 * - require superuser power.
4584 * - require strict serialization.
4585 * - return a value
4586 */
4587 case SIOCGMIIPHY:
4588 case SIOCGMIIREG:
4589 case SIOCSIFNAME:
4590 if (!capable(CAP_NET_ADMIN))
4591 return -EPERM;
4592 dev_load(net, ifr.ifr_name);
4593 rtnl_lock();
4594 ret = dev_ifsioc(net, &ifr, cmd);
4595 rtnl_unlock();
4596 if (!ret) {
4597 if (colon)
4598 *colon = ':';
4599 if (copy_to_user(arg, &ifr,
4600 sizeof(struct ifreq)))
4601 ret = -EFAULT;
4602 }
4603 return ret;
4604
4605 /*
4606 * These ioctl calls:
4607 * - require superuser power.
4608 * - require strict serialization.
4609 * - do not return a value
4610 */
4611 case SIOCSIFFLAGS:
4612 case SIOCSIFMETRIC:
4613 case SIOCSIFMTU:
4614 case SIOCSIFMAP:
4615 case SIOCSIFHWADDR:
4616 case SIOCSIFSLAVE:
4617 case SIOCADDMULTI:
4618 case SIOCDELMULTI:
4619 case SIOCSIFHWBROADCAST:
4620 case SIOCSIFTXQLEN:
4621 case SIOCSMIIREG:
4622 case SIOCBONDENSLAVE:
4623 case SIOCBONDRELEASE:
4624 case SIOCBONDSETHWADDR:
4625 case SIOCBONDCHANGEACTIVE:
4626 case SIOCBRADDIF:
4627 case SIOCBRDELIF:
4628 case SIOCSHWTSTAMP:
4629 if (!capable(CAP_NET_ADMIN))
4630 return -EPERM;
4631 /* fall through */
4632 case SIOCBONDSLAVEINFOQUERY:
4633 case SIOCBONDINFOQUERY:
4634 dev_load(net, ifr.ifr_name);
4635 rtnl_lock();
4636 ret = dev_ifsioc(net, &ifr, cmd);
4637 rtnl_unlock();
4638 return ret;
4639
4640 case SIOCGIFMEM:
4641 /* Get the per device memory space. We can add this but
4642 * currently do not support it */
4643 case SIOCSIFMEM:
4644 /* Set the per device memory buffer space.
4645 * Not applicable in our case */
4646 case SIOCSIFLINK:
4647 return -EINVAL;
4648
4649 /*
4650 * Unknown or private ioctl.
4651 */
4652 default:
4653 if (cmd == SIOCWANDEV ||
4654 (cmd >= SIOCDEVPRIVATE &&
4655 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004656 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004657 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004658 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004660 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004662 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004664 }
4665 /* Take care of Wireless Extensions */
4666 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4667 return wext_handle_ioctl(net, &ifr, cmd, arg);
4668 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 }
4670}
4671
4672
4673/**
4674 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004675 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676 *
4677 * Returns a suitable unique value for a new device interface
4678 * number. The caller must hold the rtnl semaphore or the
4679 * dev_base_lock to be sure it remains unique.
4680 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004681static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682{
4683 static int ifindex;
4684 for (;;) {
4685 if (++ifindex <= 0)
4686 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004687 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 return ifindex;
4689 }
4690}
4691
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004693static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004695static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698}
4699
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004700static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004701{
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004702 struct net_device *dev;
4703
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004704 BUG_ON(dev_boot_phase);
4705 ASSERT_RTNL();
4706
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004707 list_for_each_entry(dev, head, unreg_list) {
4708 /* Some devices call without registering
4709 * for initialization unwind.
4710 */
4711 if (dev->reg_state == NETREG_UNINITIALIZED) {
4712 pr_debug("unregister_netdevice: device %s/%p never "
4713 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004714
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004715 WARN_ON(1);
4716 return;
4717 }
4718
4719 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4720
4721 /* If device is running, close it first. */
4722 dev_close(dev);
4723
4724 /* And unlink it from device chain. */
4725 unlist_netdevice(dev);
4726
4727 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004728 }
4729
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004730 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004731
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004732 list_for_each_entry(dev, head, unreg_list) {
4733 /* Shutdown queueing discipline. */
4734 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004735
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004736
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004737 /* Notify protocols, that we are about to destroy
4738 this device. They should clean all the things.
4739 */
4740 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4741
4742 /*
4743 * Flush the unicast and multicast chains
4744 */
4745 dev_unicast_flush(dev);
4746 dev_addr_discard(dev);
4747
4748 if (dev->netdev_ops->ndo_uninit)
4749 dev->netdev_ops->ndo_uninit(dev);
4750
4751 /* Notifier chain MUST detach us from master device. */
4752 WARN_ON(dev->master);
4753
4754 /* Remove entries from kobject tree */
4755 netdev_unregister_kobject(dev);
4756 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004757
4758 synchronize_net();
4759
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004760 list_for_each_entry(dev, head, unreg_list)
4761 dev_put(dev);
4762}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004763
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004764static void rollback_registered(struct net_device *dev)
4765{
4766 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004767
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004768 list_add(&dev->unreg_list, &single);
4769 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004770}
4771
David S. Millere8a04642008-07-17 00:34:19 -07004772static void __netdev_init_queue_locks_one(struct net_device *dev,
4773 struct netdev_queue *dev_queue,
4774 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004775{
4776 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004777 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004778 dev_queue->xmit_lock_owner = -1;
4779}
4780
4781static void netdev_init_queue_locks(struct net_device *dev)
4782{
David S. Millere8a04642008-07-17 00:34:19 -07004783 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4784 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004785}
4786
Herbert Xub63365a2008-10-23 01:11:29 -07004787unsigned long netdev_fix_features(unsigned long features, const char *name)
4788{
4789 /* Fix illegal SG+CSUM combinations. */
4790 if ((features & NETIF_F_SG) &&
4791 !(features & NETIF_F_ALL_CSUM)) {
4792 if (name)
4793 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4794 "checksum feature.\n", name);
4795 features &= ~NETIF_F_SG;
4796 }
4797
4798 /* TSO requires that SG is present as well. */
4799 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4800 if (name)
4801 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4802 "SG feature.\n", name);
4803 features &= ~NETIF_F_TSO;
4804 }
4805
4806 if (features & NETIF_F_UFO) {
4807 if (!(features & NETIF_F_GEN_CSUM)) {
4808 if (name)
4809 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4810 "since no NETIF_F_HW_CSUM feature.\n",
4811 name);
4812 features &= ~NETIF_F_UFO;
4813 }
4814
4815 if (!(features & NETIF_F_SG)) {
4816 if (name)
4817 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4818 "since no NETIF_F_SG feature.\n", name);
4819 features &= ~NETIF_F_UFO;
4820 }
4821 }
4822
4823 return features;
4824}
4825EXPORT_SYMBOL(netdev_fix_features);
4826
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827/**
4828 * register_netdevice - register a network device
4829 * @dev: device to register
4830 *
4831 * Take a completed network device structure and add it to the kernel
4832 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4833 * chain. 0 is returned on success. A negative errno code is returned
4834 * on a failure to set up the device, or if the name is a duplicate.
4835 *
4836 * Callers must hold the rtnl semaphore. You may want
4837 * register_netdev() instead of this.
4838 *
4839 * BUGS:
4840 * The locking appears insufficient to guarantee two parallel registers
4841 * will not get the same name.
4842 */
4843
4844int register_netdevice(struct net_device *dev)
4845{
4846 struct hlist_head *head;
4847 struct hlist_node *p;
4848 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004849 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850
4851 BUG_ON(dev_boot_phase);
4852 ASSERT_RTNL();
4853
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004854 might_sleep();
4855
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 /* When net_device's are persistent, this will be fatal. */
4857 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004858 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859
David S. Millerf1f28aa2008-07-15 00:08:33 -07004860 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004861 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004862 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 dev->iflink = -1;
4865
4866 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004867 if (dev->netdev_ops->ndo_init) {
4868 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 if (ret) {
4870 if (ret > 0)
4871 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004872 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 }
4874 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004875
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876 if (!dev_valid_name(dev->name)) {
4877 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004878 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 }
4880
Eric W. Biederman881d9662007-09-17 11:56:21 -07004881 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 if (dev->iflink == -1)
4883 dev->iflink = dev->ifindex;
4884
4885 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004886 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887 hlist_for_each(p, head) {
4888 struct net_device *d
4889 = hlist_entry(p, struct net_device, name_hlist);
4890 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4891 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004892 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004896 /* Fix illegal checksum combinations */
4897 if ((dev->features & NETIF_F_HW_CSUM) &&
4898 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4899 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4900 dev->name);
4901 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4902 }
4903
4904 if ((dev->features & NETIF_F_NO_CSUM) &&
4905 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4906 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4907 dev->name);
4908 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4909 }
4910
Herbert Xub63365a2008-10-23 01:11:29 -07004911 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004913 /* Enable software GSO if SG is supported. */
4914 if (dev->features & NETIF_F_SG)
4915 dev->features |= NETIF_F_GSO;
4916
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004917 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004918
4919 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4920 ret = notifier_to_errno(ret);
4921 if (ret)
4922 goto err_uninit;
4923
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004924 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004925 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004926 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004927 dev->reg_state = NETREG_REGISTERED;
4928
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 /*
4930 * Default initial state at registry is that the
4931 * device is present.
4932 */
4933
4934 set_bit(__LINK_STATE_PRESENT, &dev->state);
4935
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004938 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939
4940 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004941 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004942 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004943 if (ret) {
4944 rollback_registered(dev);
4945 dev->reg_state = NETREG_UNREGISTERED;
4946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947
4948out:
4949 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004950
4951err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004952 if (dev->netdev_ops->ndo_uninit)
4953 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004954 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004956EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957
4958/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004959 * init_dummy_netdev - init a dummy network device for NAPI
4960 * @dev: device to init
4961 *
4962 * This takes a network device structure and initialize the minimum
4963 * amount of fields so it can be used to schedule NAPI polls without
4964 * registering a full blown interface. This is to be used by drivers
4965 * that need to tie several hardware interfaces to a single NAPI
4966 * poll scheduler due to HW limitations.
4967 */
4968int init_dummy_netdev(struct net_device *dev)
4969{
4970 /* Clear everything. Note we don't initialize spinlocks
4971 * are they aren't supposed to be taken by any of the
4972 * NAPI code and this dummy netdev is supposed to be
4973 * only ever used for NAPI polls
4974 */
4975 memset(dev, 0, sizeof(struct net_device));
4976
4977 /* make sure we BUG if trying to hit standard
4978 * register/unregister code path
4979 */
4980 dev->reg_state = NETREG_DUMMY;
4981
4982 /* initialize the ref count */
4983 atomic_set(&dev->refcnt, 1);
4984
4985 /* NAPI wants this */
4986 INIT_LIST_HEAD(&dev->napi_list);
4987
4988 /* a dummy interface is started by default */
4989 set_bit(__LINK_STATE_PRESENT, &dev->state);
4990 set_bit(__LINK_STATE_START, &dev->state);
4991
4992 return 0;
4993}
4994EXPORT_SYMBOL_GPL(init_dummy_netdev);
4995
4996
4997/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 * register_netdev - register a network device
4999 * @dev: device to register
5000 *
5001 * Take a completed network device structure and add it to the kernel
5002 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5003 * chain. 0 is returned on success. A negative errno code is returned
5004 * on a failure to set up the device, or if the name is a duplicate.
5005 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07005006 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 * and expands the device name if you passed a format string to
5008 * alloc_netdev.
5009 */
5010int register_netdev(struct net_device *dev)
5011{
5012 int err;
5013
5014 rtnl_lock();
5015
5016 /*
5017 * If the name is a format string the caller wants us to do a
5018 * name allocation.
5019 */
5020 if (strchr(dev->name, '%')) {
5021 err = dev_alloc_name(dev, dev->name);
5022 if (err < 0)
5023 goto out;
5024 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005025
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 err = register_netdevice(dev);
5027out:
5028 rtnl_unlock();
5029 return err;
5030}
5031EXPORT_SYMBOL(register_netdev);
5032
5033/*
5034 * netdev_wait_allrefs - wait until all references are gone.
5035 *
5036 * This is called when unregistering network devices.
5037 *
5038 * Any protocol or device that holds a reference should register
5039 * for netdevice notification, and cleanup and put back the
5040 * reference if they receive an UNREGISTER event.
5041 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005042 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 */
5044static void netdev_wait_allrefs(struct net_device *dev)
5045{
5046 unsigned long rebroadcast_time, warning_time;
5047
5048 rebroadcast_time = warning_time = jiffies;
5049 while (atomic_read(&dev->refcnt) != 0) {
5050 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005051 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052
5053 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07005054 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055
5056 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5057 &dev->state)) {
5058 /* We must not have linkwatch events
5059 * pending on unregister. If this
5060 * happens, we simply run the queue
5061 * unscheduled, resulting in a noop
5062 * for this device.
5063 */
5064 linkwatch_run_queue();
5065 }
5066
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005067 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068
5069 rebroadcast_time = jiffies;
5070 }
5071
5072 msleep(250);
5073
5074 if (time_after(jiffies, warning_time + 10 * HZ)) {
5075 printk(KERN_EMERG "unregister_netdevice: "
5076 "waiting for %s to become free. Usage "
5077 "count = %d\n",
5078 dev->name, atomic_read(&dev->refcnt));
5079 warning_time = jiffies;
5080 }
5081 }
5082}
5083
5084/* The sequence is:
5085 *
5086 * rtnl_lock();
5087 * ...
5088 * register_netdevice(x1);
5089 * register_netdevice(x2);
5090 * ...
5091 * unregister_netdevice(y1);
5092 * unregister_netdevice(y2);
5093 * ...
5094 * rtnl_unlock();
5095 * free_netdev(y1);
5096 * free_netdev(y2);
5097 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005098 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005100 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 * without deadlocking with linkwatch via keventd.
5102 * 2) Since we run with the RTNL semaphore not held, we can sleep
5103 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005104 *
5105 * We must not return until all unregister events added during
5106 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108void netdev_run_todo(void)
5109{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005110 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005113 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005114
5115 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005116
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 while (!list_empty(&list)) {
5118 struct net_device *dev
5119 = list_entry(list.next, struct net_device, todo_list);
5120 list_del(&dev->todo_list);
5121
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005122 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 printk(KERN_ERR "network todo '%s' but state %d\n",
5124 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005125 dump_stack();
5126 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005128
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005129 dev->reg_state = NETREG_UNREGISTERED;
5130
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005131 on_each_cpu(flush_backlog, dev, 1);
5132
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005133 netdev_wait_allrefs(dev);
5134
5135 /* paranoia */
5136 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005137 WARN_ON(dev->ip_ptr);
5138 WARN_ON(dev->ip6_ptr);
5139 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005140
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005141 if (dev->destructor)
5142 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005143
5144 /* Free network device */
5145 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147}
5148
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005149/**
5150 * dev_get_stats - get network device statistics
5151 * @dev: device to get statistics from
5152 *
5153 * Get network statistics from device. The device driver may provide
5154 * its own method by setting dev->netdev_ops->get_stats; otherwise
5155 * the internal statistics structure is used.
5156 */
5157const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005158{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005159 const struct net_device_ops *ops = dev->netdev_ops;
5160
5161 if (ops->ndo_get_stats)
5162 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005163 else {
5164 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5165 struct net_device_stats *stats = &dev->stats;
5166 unsigned int i;
5167 struct netdev_queue *txq;
5168
5169 for (i = 0; i < dev->num_tx_queues; i++) {
5170 txq = netdev_get_tx_queue(dev, i);
5171 tx_bytes += txq->tx_bytes;
5172 tx_packets += txq->tx_packets;
5173 tx_dropped += txq->tx_dropped;
5174 }
5175 if (tx_bytes || tx_packets || tx_dropped) {
5176 stats->tx_bytes = tx_bytes;
5177 stats->tx_packets = tx_packets;
5178 stats->tx_dropped = tx_dropped;
5179 }
5180 return stats;
5181 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005182}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005183EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005184
David S. Millerdc2b4842008-07-08 17:18:23 -07005185static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005186 struct netdev_queue *queue,
5187 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005188{
David S. Millerdc2b4842008-07-08 17:18:23 -07005189 queue->dev = dev;
5190}
5191
David S. Millerbb949fb2008-07-08 16:55:56 -07005192static void netdev_init_queues(struct net_device *dev)
5193{
David S. Millere8a04642008-07-17 00:34:19 -07005194 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5195 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005196 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005197}
5198
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005200 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 * @sizeof_priv: size of private data to allocate space for
5202 * @name: device name format string
5203 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005204 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 *
5206 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005207 * and performs basic initialization. Also allocates subquue structs
5208 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005209 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005210struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5211 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212{
David S. Millere8a04642008-07-17 00:34:19 -07005213 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005215 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005216 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005218 BUG_ON(strlen(name) >= sizeof(dev->name));
5219
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005220 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005221 if (sizeof_priv) {
5222 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005223 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005224 alloc_size += sizeof_priv;
5225 }
5226 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005227 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005229 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005231 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 return NULL;
5233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234
Stephen Hemminger79439862008-07-21 13:28:44 -07005235 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005236 if (!tx) {
5237 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5238 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005239 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005240 }
5241
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005242 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005244
5245 if (dev_addr_init(dev))
5246 goto free_tx;
5247
Jiri Pirkoccffad22009-05-22 23:22:17 +00005248 dev_unicast_init(dev);
5249
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005250 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251
David S. Millere8a04642008-07-17 00:34:19 -07005252 dev->_tx = tx;
5253 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005254 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005255
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005256 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257
David S. Millerbb949fb2008-07-08 16:55:56 -07005258 netdev_init_queues(dev);
5259
Herbert Xud565b0a2008-12-15 23:38:52 -08005260 INIT_LIST_HEAD(&dev->napi_list);
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005261 INIT_LIST_HEAD(&dev->unreg_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005262 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 setup(dev);
5264 strcpy(dev->name, name);
5265 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005266
5267free_tx:
5268 kfree(tx);
5269
5270free_p:
5271 kfree(p);
5272 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005274EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275
5276/**
5277 * free_netdev - free network device
5278 * @dev: device
5279 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005280 * This function does the last stage of destroying an allocated device
5281 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282 * If this is the last reference then it will be freed.
5283 */
5284void free_netdev(struct net_device *dev)
5285{
Herbert Xud565b0a2008-12-15 23:38:52 -08005286 struct napi_struct *p, *n;
5287
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005288 release_net(dev_net(dev));
5289
David S. Millere8a04642008-07-17 00:34:19 -07005290 kfree(dev->_tx);
5291
Jiri Pirkof001fde2009-05-05 02:48:28 +00005292 /* Flush device addresses */
5293 dev_addr_flush(dev);
5294
Herbert Xud565b0a2008-12-15 23:38:52 -08005295 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5296 netif_napi_del(p);
5297
Stephen Hemminger3041a062006-05-26 13:25:24 -07005298 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299 if (dev->reg_state == NETREG_UNINITIALIZED) {
5300 kfree((char *)dev - dev->padded);
5301 return;
5302 }
5303
5304 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5305 dev->reg_state = NETREG_RELEASED;
5306
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005307 /* will free via device release */
5308 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005310EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005311
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005312/**
5313 * synchronize_net - Synchronize with packet receive processing
5314 *
5315 * Wait for packets currently being received to be done.
5316 * Does not block later packets from starting.
5317 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005318void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319{
5320 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005321 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005323EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005324
5325/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005326 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005328 * @head: list
5329
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005331 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005332 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 *
5334 * Callers must hold the rtnl semaphore. You may want
5335 * unregister_netdev() instead of this.
5336 */
5337
Eric Dumazet44a08732009-10-27 07:03:04 +00005338void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005339{
Herbert Xua6620712007-12-12 19:21:56 -08005340 ASSERT_RTNL();
5341
Eric Dumazet44a08732009-10-27 07:03:04 +00005342 if (head) {
Eric W. Biederman9fdce092009-10-30 14:51:13 +00005343 list_move_tail(&dev->unreg_list, head);
Eric Dumazet44a08732009-10-27 07:03:04 +00005344 } else {
5345 rollback_registered(dev);
5346 /* Finish processing unregister after unlock */
5347 net_set_todo(dev);
5348 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349}
Eric Dumazet44a08732009-10-27 07:03:04 +00005350EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351
5352/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005353 * unregister_netdevice_many - unregister many devices
5354 * @head: list of devices
5355 *
5356 */
5357void unregister_netdevice_many(struct list_head *head)
5358{
5359 struct net_device *dev;
5360
5361 if (!list_empty(head)) {
5362 rollback_registered_many(head);
5363 list_for_each_entry(dev, head, unreg_list)
5364 net_set_todo(dev);
5365 }
5366}
Eric Dumazet63c80992009-10-27 07:06:49 +00005367EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005368
5369/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 * unregister_netdev - remove device from the kernel
5371 * @dev: device
5372 *
5373 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005374 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375 *
5376 * This is just a wrapper for unregister_netdevice that takes
5377 * the rtnl semaphore. In general you want to use this and not
5378 * unregister_netdevice.
5379 */
5380void unregister_netdev(struct net_device *dev)
5381{
5382 rtnl_lock();
5383 unregister_netdevice(dev);
5384 rtnl_unlock();
5385}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386EXPORT_SYMBOL(unregister_netdev);
5387
Eric W. Biedermance286d32007-09-12 13:53:49 +02005388/**
5389 * dev_change_net_namespace - move device to different nethost namespace
5390 * @dev: device
5391 * @net: network namespace
5392 * @pat: If not NULL name pattern to try if the current device name
5393 * is already taken in the destination network namespace.
5394 *
5395 * This function shuts down a device interface and moves it
5396 * to a new network namespace. On success 0 is returned, on
5397 * a failure a netagive errno code is returned.
5398 *
5399 * Callers must hold the rtnl semaphore.
5400 */
5401
5402int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5403{
5404 char buf[IFNAMSIZ];
5405 const char *destname;
5406 int err;
5407
5408 ASSERT_RTNL();
5409
5410 /* Don't allow namespace local devices to be moved. */
5411 err = -EINVAL;
5412 if (dev->features & NETIF_F_NETNS_LOCAL)
5413 goto out;
5414
Eric W. Biederman38918452008-10-27 17:51:47 -07005415#ifdef CONFIG_SYSFS
5416 /* Don't allow real devices to be moved when sysfs
5417 * is enabled.
5418 */
5419 err = -EINVAL;
5420 if (dev->dev.parent)
5421 goto out;
5422#endif
5423
Eric W. Biedermance286d32007-09-12 13:53:49 +02005424 /* Ensure the device has been registrered */
5425 err = -EINVAL;
5426 if (dev->reg_state != NETREG_REGISTERED)
5427 goto out;
5428
5429 /* Get out if there is nothing todo */
5430 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005431 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005432 goto out;
5433
5434 /* Pick the destination device name, and ensure
5435 * we can use it in the destination network namespace.
5436 */
5437 err = -EEXIST;
5438 destname = dev->name;
5439 if (__dev_get_by_name(net, destname)) {
5440 /* We get here if we can't use the current device name */
5441 if (!pat)
5442 goto out;
5443 if (!dev_valid_name(pat))
5444 goto out;
5445 if (strchr(pat, '%')) {
5446 if (__dev_alloc_name(net, pat, buf) < 0)
5447 goto out;
5448 destname = buf;
5449 } else
5450 destname = pat;
5451 if (__dev_get_by_name(net, destname))
5452 goto out;
5453 }
5454
5455 /*
5456 * And now a mini version of register_netdevice unregister_netdevice.
5457 */
5458
5459 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005460 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005461
5462 /* And unlink it from device chain */
5463 err = -ENODEV;
5464 unlist_netdevice(dev);
5465
5466 synchronize_net();
5467
5468 /* Shutdown queueing discipline. */
5469 dev_shutdown(dev);
5470
5471 /* Notify protocols, that we are about to destroy
5472 this device. They should clean all the things.
5473 */
5474 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5475
5476 /*
5477 * Flush the unicast and multicast chains
5478 */
Jiri Pirkoccffad22009-05-22 23:22:17 +00005479 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005480 dev_addr_discard(dev);
5481
Eric W. Biederman38918452008-10-27 17:51:47 -07005482 netdev_unregister_kobject(dev);
5483
Eric W. Biedermance286d32007-09-12 13:53:49 +02005484 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005485 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005486
5487 /* Assign the new device name */
5488 if (destname != dev->name)
5489 strcpy(dev->name, destname);
5490
5491 /* If there is an ifindex conflict assign a new one */
5492 if (__dev_get_by_index(net, dev->ifindex)) {
5493 int iflink = (dev->iflink == dev->ifindex);
5494 dev->ifindex = dev_new_index(net);
5495 if (iflink)
5496 dev->iflink = dev->ifindex;
5497 }
5498
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005499 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005500 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005501 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005502
5503 /* Add the device back in the hashes */
5504 list_netdevice(dev);
5505
5506 /* Notify protocols, that a new device appeared. */
5507 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5508
5509 synchronize_net();
5510 err = 0;
5511out:
5512 return err;
5513}
Johannes Berg463d0182009-07-14 00:33:35 +02005514EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005515
Linus Torvalds1da177e2005-04-16 15:20:36 -07005516static int dev_cpu_callback(struct notifier_block *nfb,
5517 unsigned long action,
5518 void *ocpu)
5519{
5520 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005521 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522 struct sk_buff *skb;
5523 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5524 struct softnet_data *sd, *oldsd;
5525
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005526 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527 return NOTIFY_OK;
5528
5529 local_irq_disable();
5530 cpu = smp_processor_id();
5531 sd = &per_cpu(softnet_data, cpu);
5532 oldsd = &per_cpu(softnet_data, oldcpu);
5533
5534 /* Find end of our completion_queue. */
5535 list_skb = &sd->completion_queue;
5536 while (*list_skb)
5537 list_skb = &(*list_skb)->next;
5538 /* Append completion queue from offline CPU. */
5539 *list_skb = oldsd->completion_queue;
5540 oldsd->completion_queue = NULL;
5541
5542 /* Find end of our output_queue. */
5543 list_net = &sd->output_queue;
5544 while (*list_net)
5545 list_net = &(*list_net)->next_sched;
5546 /* Append output queue from offline CPU. */
5547 *list_net = oldsd->output_queue;
5548 oldsd->output_queue = NULL;
5549
5550 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5551 local_irq_enable();
5552
5553 /* Process offline CPU's input_pkt_queue */
5554 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5555 netif_rx(skb);
5556
5557 return NOTIFY_OK;
5558}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005559
5560
Herbert Xu7f353bf2007-08-10 15:47:58 -07005561/**
Herbert Xub63365a2008-10-23 01:11:29 -07005562 * netdev_increment_features - increment feature set by one
5563 * @all: current feature set
5564 * @one: new feature set
5565 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005566 *
5567 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005568 * @one to the master device with current feature set @all. Will not
5569 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005570 */
Herbert Xub63365a2008-10-23 01:11:29 -07005571unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5572 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005573{
Herbert Xub63365a2008-10-23 01:11:29 -07005574 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005575 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005576 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5577 else if (mask & NETIF_F_ALL_CSUM) {
5578 /* If one device supports v4/v6 checksumming, set for all. */
5579 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5580 !(all & NETIF_F_GEN_CSUM)) {
5581 all &= ~NETIF_F_ALL_CSUM;
5582 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5583 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005584
Herbert Xub63365a2008-10-23 01:11:29 -07005585 /* If one device supports hw checksumming, set for all. */
5586 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5587 all &= ~NETIF_F_ALL_CSUM;
5588 all |= NETIF_F_HW_CSUM;
5589 }
5590 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005591
Herbert Xub63365a2008-10-23 01:11:29 -07005592 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005593
Herbert Xub63365a2008-10-23 01:11:29 -07005594 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005595 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005596 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005597
5598 return all;
5599}
Herbert Xub63365a2008-10-23 01:11:29 -07005600EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005601
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005602static struct hlist_head *netdev_create_hash(void)
5603{
5604 int i;
5605 struct hlist_head *hash;
5606
5607 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5608 if (hash != NULL)
5609 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5610 INIT_HLIST_HEAD(&hash[i]);
5611
5612 return hash;
5613}
5614
Eric W. Biederman881d9662007-09-17 11:56:21 -07005615/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005616static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005617{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005618 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005619
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005620 net->dev_name_head = netdev_create_hash();
5621 if (net->dev_name_head == NULL)
5622 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005623
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005624 net->dev_index_head = netdev_create_hash();
5625 if (net->dev_index_head == NULL)
5626 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005627
5628 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005629
5630err_idx:
5631 kfree(net->dev_name_head);
5632err_name:
5633 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005634}
5635
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005636/**
5637 * netdev_drivername - network driver for the device
5638 * @dev: network device
5639 * @buffer: buffer for resulting name
5640 * @len: size of buffer
5641 *
5642 * Determine network driver for device.
5643 */
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005644char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005645{
Stephen Hemmingercf04a4c2008-09-30 02:22:14 -07005646 const struct device_driver *driver;
5647 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005648
5649 if (len <= 0 || !buffer)
5650 return buffer;
5651 buffer[0] = 0;
5652
5653 parent = dev->dev.parent;
5654
5655 if (!parent)
5656 return buffer;
5657
5658 driver = parent->driver;
5659 if (driver && driver->name)
5660 strlcpy(buffer, driver->name, len);
5661 return buffer;
5662}
5663
Pavel Emelyanov46650792007-10-08 20:38:39 -07005664static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005665{
5666 kfree(net->dev_name_head);
5667 kfree(net->dev_index_head);
5668}
5669
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005670static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005671 .init = netdev_init,
5672 .exit = netdev_exit,
5673};
5674
Pavel Emelyanov46650792007-10-08 20:38:39 -07005675static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005676{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005677 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005678 /*
5679 * Push all migratable of the network devices back to the
5680 * initial network namespace
5681 */
5682 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005683restart:
5684 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005685 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005686 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005687
5688 /* Ignore unmoveable devices (i.e. loopback) */
5689 if (dev->features & NETIF_F_NETNS_LOCAL)
5690 continue;
5691
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005692 /* Delete virtual devices */
5693 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
Eric Dumazet23289a32009-10-27 07:06:36 +00005694 dev->rtnl_link_ops->dellink(dev, NULL);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005695 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005696 }
5697
Eric W. Biedermance286d32007-09-12 13:53:49 +02005698 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005699 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5700 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005701 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005702 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005703 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005704 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005705 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005706 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005707 }
5708 rtnl_unlock();
5709}
5710
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005711static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005712 .exit = default_device_exit,
5713};
5714
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715/*
5716 * Initialize the DEV module. At boot time this walks the device list and
5717 * unhooks any devices that fail to initialise (normally hardware not
5718 * present) and leaves us with a valid list of present and active devices.
5719 *
5720 */
5721
5722/*
5723 * This is called single threaded during boot, so no need
5724 * to take the rtnl semaphore.
5725 */
5726static int __init net_dev_init(void)
5727{
5728 int i, rc = -ENOMEM;
5729
5730 BUG_ON(!dev_boot_phase);
5731
Linus Torvalds1da177e2005-04-16 15:20:36 -07005732 if (dev_proc_init())
5733 goto out;
5734
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005735 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005736 goto out;
5737
5738 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a862007-11-26 20:12:58 +08005739 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 INIT_LIST_HEAD(&ptype_base[i]);
5741
Eric W. Biederman881d9662007-09-17 11:56:21 -07005742 if (register_pernet_subsys(&netdev_net_ops))
5743 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744
5745 /*
5746 * Initialise the packet receive queues.
5747 */
5748
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005749 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005750 struct softnet_data *queue;
5751
5752 queue = &per_cpu(softnet_data, i);
5753 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005754 queue->completion_queue = NULL;
5755 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005756
5757 queue->backlog.poll = process_backlog;
5758 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005759 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005760 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005761 }
5762
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 dev_boot_phase = 0;
5764
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005765 /* The loopback device is special if any other network devices
5766 * is present in a network namespace the loopback device must
5767 * be present. Since we now dynamically allocate and free the
5768 * loopback device ensure this invariant is maintained by
5769 * keeping the loopback device as the first device on the
5770 * list of network devices. Ensuring the loopback devices
5771 * is the first device that appears and the last network device
5772 * that disappears.
5773 */
5774 if (register_pernet_device(&loopback_net_ops))
5775 goto out;
5776
5777 if (register_pernet_device(&default_device_ops))
5778 goto out;
5779
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005780 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5781 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782
5783 hotcpu_notifier(dev_cpu_callback, 0);
5784 dst_init();
5785 dev_mcast_init();
5786 rc = 0;
5787out:
5788 return rc;
5789}
5790
5791subsys_initcall(net_dev_init);
5792
Krishna Kumare88721f2009-02-18 17:55:02 -08005793static int __init initialize_hashrnd(void)
5794{
5795 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5796 return 0;
5797}
5798
5799late_initcall_sync(initialize_hashrnd);
5800