blob: 09551cc143a980801a9fc1a5f7aa63b8c7b99e22 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
Eric W. Biederman881d9662007-09-17 11:56:21 -0700196static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700199 return &net->dev_name_head[hash & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Eric Dumazet7c28bd02009-10-24 06:13:17 -0700204 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
206
Eric W. Biedermance286d32007-09-12 13:53:49 +0200207/* Device list insertion */
208static int list_netdevice(struct net_device *dev)
209{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900210 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211
212 ASSERT_RTNL();
213
214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
218 write_unlock_bh(&dev_base_lock);
219 return 0;
220}
221
222/* Device list removal */
223static void unlist_netdevice(struct net_device *dev)
224{
225 ASSERT_RTNL();
226
227 /* Unlink dev from the device chain */
228 write_lock_bh(&dev_base_lock);
229 list_del(&dev->dev_list);
230 hlist_del(&dev->name_hlist);
231 hlist_del(&dev->index_hlist);
232 write_unlock_bh(&dev_base_lock);
233}
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/*
236 * Our notifier list
237 */
238
Alan Sternf07d5b92006-05-09 15:23:03 -0700239static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241/*
242 * Device drivers call our routines to queue packets here. We empty the
243 * queue in the local softnet handler.
244 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700245
246DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700247EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
David S. Millercf508b12008-07-22 14:16:42 -0700249#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700250/*
David S. Millerc773e842008-07-08 23:13:53 -0700251 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700252 * according to dev->type
253 */
254static const unsigned short netdev_lock_type[] =
255 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
256 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
257 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
258 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
259 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
260 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
261 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
262 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
263 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
264 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
265 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
266 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
267 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800268 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400269 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000270 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700271
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700272static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
274 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
275 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
276 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
277 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
278 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
279 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
280 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
281 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
282 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
283 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
284 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
285 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800286 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400287 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000288 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700289
290static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700291static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static inline unsigned short netdev_lock_pos(unsigned short dev_type)
294{
295 int i;
296
297 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
298 if (netdev_lock_type[i] == dev_type)
299 return i;
300 /* the last key is used by default */
301 return ARRAY_SIZE(netdev_lock_type) - 1;
302}
303
David S. Millercf508b12008-07-22 14:16:42 -0700304static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
305 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700306{
307 int i;
308
309 i = netdev_lock_pos(dev_type);
310 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
311 netdev_lock_name[i]);
312}
David S. Millercf508b12008-07-22 14:16:42 -0700313
314static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
315{
316 int i;
317
318 i = netdev_lock_pos(dev->type);
319 lockdep_set_class_and_name(&dev->addr_list_lock,
320 &netdev_addr_lock_key[i],
321 netdev_lock_name[i]);
322}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700323#else
David S. Millercf508b12008-07-22 14:16:42 -0700324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
326{
327}
328static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700329{
330}
331#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333/*******************************************************************************
334
335 Protocol management and registration routines
336
337*******************************************************************************/
338
339/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Add a protocol ID to the list. Now that the input handler is
341 * smarter we can dispense with all the messy stuff that used to be
342 * here.
343 *
344 * BEWARE!!! Protocol handlers, mangling input packets,
345 * MUST BE last in hash buckets and checking protocol handlers
346 * MUST start from promiscuous ptype_all chain in net_bh.
347 * It is true now, do not change it.
348 * Explanation follows: if protocol handler, mangling packet, will
349 * be the first on list, it is not able to sense, that packet
350 * is cloned and should be copied-on-write, so that it will
351 * change it and subsequent readers will get broken packet.
352 * --ANK (980803)
353 */
354
355/**
356 * dev_add_pack - add packet handler
357 * @pt: packet type declaration
358 *
359 * Add a protocol handler to the networking stack. The passed &packet_type
360 * is linked into kernel lists and may not be freed until it has been
361 * removed from the kernel lists.
362 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900363 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 * guarantee all CPU's that are in middle of receiving packets
365 * will see the new packet type (until the next received packet).
366 */
367
368void dev_add_pack(struct packet_type *pt)
369{
370 int hash;
371
372 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700373 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700375 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800376 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_base[hash]);
378 }
379 spin_unlock_bh(&ptype_lock);
380}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700381EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
386 *
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900390 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
395 */
396void __dev_remove_pack(struct packet_type *pt)
397{
398 struct list_head *head;
399 struct packet_type *pt1;
400
401 spin_lock_bh(&ptype_lock);
402
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700403 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 list_for_each_entry(pt1, head, list) {
409 if (pt == pt1) {
410 list_del_rcu(&pt->list);
411 goto out;
412 }
413 }
414
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416out:
417 spin_unlock_bh(&ptype_lock);
418}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700419EXPORT_SYMBOL(__dev_remove_pack);
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421/**
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
424 *
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
428 * returns.
429 *
430 * This call sleeps to guarantee that no CPU is looking at the packet
431 * type after return.
432 */
433void dev_remove_pack(struct packet_type *pt)
434{
435 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 synchronize_net();
438}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700439EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441/******************************************************************************
442
443 Device Boot-time Settings Routines
444
445*******************************************************************************/
446
447/* Boot time configuration table */
448static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
449
450/**
451 * netdev_boot_setup_add - add new setup entry
452 * @name: name of the device
453 * @map: configured settings for the device
454 *
455 * Adds new setup entry to the dev_boot_setup list. The function
456 * returns 0 on error and 1 on success. This is a generic routine to
457 * all netdevices.
458 */
459static int netdev_boot_setup_add(char *name, struct ifmap *map)
460{
461 struct netdev_boot_setup *s;
462 int i;
463
464 s = dev_boot_setup;
465 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
466 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
467 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700468 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 memcpy(&s[i].map, map, sizeof(s[i].map));
470 break;
471 }
472 }
473
474 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
475}
476
477/**
478 * netdev_boot_setup_check - check boot time settings
479 * @dev: the netdevice
480 *
481 * Check boot time settings for the device.
482 * The found settings are set for the device to be used
483 * later in the device probing.
484 * Returns 0 if no settings found, 1 if they are.
485 */
486int netdev_boot_setup_check(struct net_device *dev)
487{
488 struct netdev_boot_setup *s = dev_boot_setup;
489 int i;
490
491 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
492 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700493 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 dev->irq = s[i].map.irq;
495 dev->base_addr = s[i].map.base_addr;
496 dev->mem_start = s[i].map.mem_start;
497 dev->mem_end = s[i].map.mem_end;
498 return 1;
499 }
500 }
501 return 0;
502}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700503EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505
506/**
507 * netdev_boot_base - get address from boot time settings
508 * @prefix: prefix for network device
509 * @unit: id for network device
510 *
511 * Check boot time settings for the base address of device.
512 * The found settings are set for the device to be used
513 * later in the device probing.
514 * Returns 0 if no settings found.
515 */
516unsigned long netdev_boot_base(const char *prefix, int unit)
517{
518 const struct netdev_boot_setup *s = dev_boot_setup;
519 char name[IFNAMSIZ];
520 int i;
521
522 sprintf(name, "%s%d", prefix, unit);
523
524 /*
525 * If device already registered then return base of 1
526 * to indicate not to probe for this interface
527 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700528 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 return 1;
530
531 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
532 if (!strcmp(name, s[i].name))
533 return s[i].map.base_addr;
534 return 0;
535}
536
537/*
538 * Saves at boot time configured settings for any netdevice.
539 */
540int __init netdev_boot_setup(char *str)
541{
542 int ints[5];
543 struct ifmap map;
544
545 str = get_options(str, ARRAY_SIZE(ints), ints);
546 if (!str || !*str)
547 return 0;
548
549 /* Save settings */
550 memset(&map, 0, sizeof(map));
551 if (ints[0] > 0)
552 map.irq = ints[1];
553 if (ints[0] > 1)
554 map.base_addr = ints[2];
555 if (ints[0] > 2)
556 map.mem_start = ints[3];
557 if (ints[0] > 3)
558 map.mem_end = ints[4];
559
560 /* Add new entry to the list */
561 return netdev_boot_setup_add(str, &map);
562}
563
564__setup("netdev=", netdev_boot_setup);
565
566/*******************************************************************************
567
568 Device Interface Subroutines
569
570*******************************************************************************/
571
572/**
573 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700574 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 * @name: name to find
576 *
577 * Find an interface by name. Must be called under RTNL semaphore
578 * or @dev_base_lock. If the name is found a pointer to the device
579 * is returned. If the name is not found then %NULL is returned. The
580 * reference counters are not incremented so the caller must be
581 * careful with locks.
582 */
583
Eric W. Biederman881d9662007-09-17 11:56:21 -0700584struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
586 struct hlist_node *p;
587
Eric W. Biederman881d9662007-09-17 11:56:21 -0700588 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 struct net_device *dev
590 = hlist_entry(p, struct net_device, name_hlist);
591 if (!strncmp(dev->name, name, IFNAMSIZ))
592 return dev;
593 }
594 return NULL;
595}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700596EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
598/**
599 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700600 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 * @name: name to find
602 *
603 * Find an interface by name. This can be called from any
604 * context and does its own locking. The returned handle has
605 * the usage count incremented and the caller must use dev_put() to
606 * release it when it is no longer needed. %NULL is returned if no
607 * matching device is found.
608 */
609
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
612 struct net_device *dev;
613
614 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700615 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (dev)
617 dev_hold(dev);
618 read_unlock(&dev_base_lock);
619 return dev;
620}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700621EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623/**
624 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700625 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 * @ifindex: index of device
627 *
628 * Search for an interface by index. Returns %NULL if the device
629 * is not found or a pointer to the device. The device has not
630 * had its reference counter increased so the caller must be careful
631 * about locking. The caller must hold either the RTNL semaphore
632 * or @dev_base_lock.
633 */
634
Eric W. Biederman881d9662007-09-17 11:56:21 -0700635struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
637 struct hlist_node *p;
638
Eric W. Biederman881d9662007-09-17 11:56:21 -0700639 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 struct net_device *dev
641 = hlist_entry(p, struct net_device, index_hlist);
642 if (dev->ifindex == ifindex)
643 return dev;
644 }
645 return NULL;
646}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700647EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649
650/**
651 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700652 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 * @ifindex: index of device
654 *
655 * Search for an interface by index. Returns NULL if the device
656 * is not found or a pointer to the device. The device returned has
657 * had a reference added and the pointer is safe until the user calls
658 * dev_put to indicate they have finished with it.
659 */
660
Eric W. Biederman881d9662007-09-17 11:56:21 -0700661struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
663 struct net_device *dev;
664
665 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (dev)
668 dev_hold(dev);
669 read_unlock(&dev_base_lock);
670 return dev;
671}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700672EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674/**
675 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700676 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 * @type: media type of device
678 * @ha: hardware address
679 *
680 * Search for an interface by MAC address. Returns NULL if the device
681 * is not found or a pointer to the device. The caller must hold the
682 * rtnl semaphore. The returned device has not had its ref count increased
683 * and the caller must therefore be careful about locking
684 *
685 * BUGS:
686 * If the API was consistent this would be __dev_get_by_hwaddr
687 */
688
Eric W. Biederman881d9662007-09-17 11:56:21 -0700689struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
691 struct net_device *dev;
692
693 ASSERT_RTNL();
694
Denis V. Lunev81103a52007-12-12 10:47:38 -0800695 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (dev->type == type &&
697 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700698 return dev;
699
700 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300702EXPORT_SYMBOL(dev_getbyhwaddr);
703
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700705{
706 struct net_device *dev;
707
708 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700711 return dev;
712
713 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700714}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700715EXPORT_SYMBOL(__dev_getfirstbyhwtype);
716
Eric W. Biederman881d9662007-09-17 11:56:21 -0700717struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
719 struct net_device *dev;
720
721 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700722 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700723 if (dev)
724 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 rtnl_unlock();
726 return dev;
727}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728EXPORT_SYMBOL(dev_getfirstbyhwtype);
729
730/**
731 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700732 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * @if_flags: IFF_* values
734 * @mask: bitmask of bits in if_flags to check
735 *
736 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900737 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 * had a reference added and the pointer is safe until the user calls
739 * dev_put to indicate they have finished with it.
740 */
741
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700742struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
743 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700745 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
Pavel Emelianov7562f872007-05-03 15:13:45 -0700747 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700749 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (((dev->flags ^ if_flags) & mask) == 0) {
751 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700752 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 break;
754 }
755 }
756 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700757 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700759EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
761/**
762 * dev_valid_name - check if name is okay for network device
763 * @name: name string
764 *
765 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700766 * to allow sysfs to work. We also disallow any kind of
767 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800769int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700771 if (*name == '\0')
772 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700773 if (strlen(name) >= IFNAMSIZ)
774 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700775 if (!strcmp(name, ".") || !strcmp(name, ".."))
776 return 0;
777
778 while (*name) {
779 if (*name == '/' || isspace(*name))
780 return 0;
781 name++;
782 }
783 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700785EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200788 * __dev_alloc_name - allocate a name for a device
789 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200791 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 *
793 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700794 * id. It scans list of devices to build up a free map, then chooses
795 * the first empty slot. The caller must hold the dev_base or rtnl lock
796 * while allocating the name and adding the device in order to avoid
797 * duplicates.
798 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
799 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 */
801
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200802static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
804 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 const char *p;
806 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700807 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 struct net_device *d;
809
810 p = strnchr(name, IFNAMSIZ-1, '%');
811 if (p) {
812 /*
813 * Verify the string as this thing may have come from
814 * the user. There must be either one "%d" and no other "%"
815 * characters.
816 */
817 if (p[1] != 'd' || strchr(p + 2, '%'))
818 return -EINVAL;
819
820 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700821 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (!inuse)
823 return -ENOMEM;
824
Eric W. Biederman881d9662007-09-17 11:56:21 -0700825 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (!sscanf(d->name, name, &i))
827 continue;
828 if (i < 0 || i >= max_netdevices)
829 continue;
830
831 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200832 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 if (!strncmp(buf, d->name, IFNAMSIZ))
834 set_bit(i, inuse);
835 }
836
837 i = find_first_zero_bit(inuse, max_netdevices);
838 free_page((unsigned long) inuse);
839 }
840
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200841 snprintf(buf, IFNAMSIZ, name, i);
842 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
845 /* It is possible to run out of possible slots
846 * when the name is long and there isn't enough space left
847 * for the digits, or if all bits are used.
848 */
849 return -ENFILE;
850}
851
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200852/**
853 * dev_alloc_name - allocate a name for a device
854 * @dev: device
855 * @name: name format string
856 *
857 * Passed a format string - eg "lt%d" it will try and find a suitable
858 * id. It scans list of devices to build up a free map, then chooses
859 * the first empty slot. The caller must hold the dev_base or rtnl lock
860 * while allocating the name and adding the device in order to avoid
861 * duplicates.
862 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
863 * Returns the number of the unit assigned or a negative errno code.
864 */
865
866int dev_alloc_name(struct net_device *dev, const char *name)
867{
868 char buf[IFNAMSIZ];
869 struct net *net;
870 int ret;
871
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900872 BUG_ON(!dev_net(dev));
873 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200874 ret = __dev_alloc_name(net, name, buf);
875 if (ret >= 0)
876 strlcpy(dev->name, buf, IFNAMSIZ);
877 return ret;
878}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700879EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882/**
883 * dev_change_name - change name of a device
884 * @dev: device
885 * @newname: name (or format string) must be at least IFNAMSIZ
886 *
887 * Change name of a device, can pass format strings "eth%d".
888 * for wildcarding.
889 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700890int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
Herbert Xufcc5a032007-07-30 17:03:38 -0700892 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700894 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700895 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896
897 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900898 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900900 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 if (dev->flags & IFF_UP)
902 return -EBUSY;
903
904 if (!dev_valid_name(newname))
905 return -EINVAL;
906
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700907 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
908 return 0;
909
Herbert Xufcc5a032007-07-30 17:03:38 -0700910 memcpy(oldname, dev->name, IFNAMSIZ);
911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (strchr(newname, '%')) {
913 err = dev_alloc_name(dev, newname);
914 if (err < 0)
915 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700916 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 return -EEXIST;
918 else
919 strlcpy(dev->name, newname, IFNAMSIZ);
920
Herbert Xufcc5a032007-07-30 17:03:38 -0700921rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700922 /* For now only devices in the initial network namespace
923 * are in sysfs.
924 */
925 if (net == &init_net) {
926 ret = device_rename(&dev->dev, dev->name);
927 if (ret) {
928 memcpy(dev->name, oldname, IFNAMSIZ);
929 return ret;
930 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700931 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700932
933 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600934 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700936 write_unlock_bh(&dev_base_lock);
937
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700939 ret = notifier_to_errno(ret);
940
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
950 }
951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
953 return err;
954}
955
956/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700957 * dev_set_alias - change ifalias of a device
958 * @dev: device
959 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700960 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700961 *
962 * Set ifalias for a device,
963 */
964int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
965{
966 ASSERT_RTNL();
967
968 if (len >= IFALIASZ)
969 return -EINVAL;
970
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700971 if (!len) {
972 if (dev->ifalias) {
973 kfree(dev->ifalias);
974 dev->ifalias = NULL;
975 }
976 return 0;
977 }
978
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700979 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700980 if (!dev->ifalias)
981 return -ENOMEM;
982
983 strlcpy(dev->ifalias, alias, len+1);
984 return len;
985}
986
987
988/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700989 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700990 * @dev: device to cause notification
991 *
992 * Called to indicate a device has changed features.
993 */
994void netdev_features_change(struct net_device *dev)
995{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700996 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700997}
998EXPORT_SYMBOL(netdev_features_change);
999
1000/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 * netdev_state_change - device changes state
1002 * @dev: device to cause notification
1003 *
1004 * Called to indicate a device has changed state. This function calls
1005 * the notifier chains for netdev_chain and sends a NEWLINK message
1006 * to the routing socket.
1007 */
1008void netdev_state_change(struct net_device *dev)
1009{
1010 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001011 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1013 }
1014}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001015EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
Moni Shoua75c78502009-09-15 02:37:40 -07001017void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001018{
Moni Shoua75c78502009-09-15 02:37:40 -07001019 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001020}
1021EXPORT_SYMBOL(netdev_bonding_change);
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023/**
1024 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001025 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 * @name: name of interface
1027 *
1028 * If a network interface is not present and the process has suitable
1029 * privileges this function loads the module. If module loading is not
1030 * available in this kernel then it becomes a nop.
1031 */
1032
Eric W. Biederman881d9662007-09-17 11:56:21 -07001033void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001035 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001038 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 read_unlock(&dev_base_lock);
1040
Eric Parisa8f80e82009-08-13 09:44:51 -04001041 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 request_module("%s", name);
1043}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001044EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046/**
1047 * dev_open - prepare an interface for use.
1048 * @dev: device to open
1049 *
1050 * Takes a device from down to up state. The device's private open
1051 * function is invoked and then the multicast lists are loaded. Finally
1052 * the device is moved into the up state and a %NETDEV_UP message is
1053 * sent to the netdev notifier chain.
1054 *
1055 * Calling this function on an active interface is a nop. On a failure
1056 * a negative errno code is returned.
1057 */
1058int dev_open(struct net_device *dev)
1059{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001060 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001061 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001063 ASSERT_RTNL();
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 /*
1066 * Is it already up?
1067 */
1068
1069 if (dev->flags & IFF_UP)
1070 return 0;
1071
1072 /*
1073 * Is it even present?
1074 */
1075 if (!netif_device_present(dev))
1076 return -ENODEV;
1077
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001078 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1079 ret = notifier_to_errno(ret);
1080 if (ret)
1081 return ret;
1082
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 /*
1084 * Call device private open method
1085 */
1086 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001087
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001088 if (ops->ndo_validate_addr)
1089 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001090
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001091 if (!ret && ops->ndo_open)
1092 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001094 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 * If it went open OK then:
1096 */
1097
Jeff Garzikbada3392007-10-23 20:19:37 -07001098 if (ret)
1099 clear_bit(__LINK_STATE_START, &dev->state);
1100 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 /*
1102 * Set the flags.
1103 */
1104 dev->flags |= IFF_UP;
1105
1106 /*
Dan Williams649274d2009-01-11 00:20:39 -08001107 * Enable NET_DMA
1108 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001109 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001110
1111 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 * Initialize multicasting status
1113 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001114 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 /*
1117 * Wakeup transmit queue engine
1118 */
1119 dev_activate(dev);
1120
1121 /*
1122 * ... and announce new interface.
1123 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001124 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 return ret;
1128}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001129EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
1131/**
1132 * dev_close - shutdown an interface.
1133 * @dev: device to shutdown
1134 *
1135 * This function moves an active device into down state. A
1136 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1137 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1138 * chain.
1139 */
1140int dev_close(struct net_device *dev)
1141{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001142 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001143 ASSERT_RTNL();
1144
David S. Miller9d5010d2007-09-12 14:33:25 +02001145 might_sleep();
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 if (!(dev->flags & IFF_UP))
1148 return 0;
1149
1150 /*
1151 * Tell people we are going down, so that they can
1152 * prepare to death, when device is still operating.
1153 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001154 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 clear_bit(__LINK_STATE_START, &dev->state);
1157
1158 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001159 * it can be even on different cpu. So just clear netif_running().
1160 *
1161 * dev->stop() will invoke napi_disable() on all of it's
1162 * napi_struct instances on this device.
1163 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001166 dev_deactivate(dev);
1167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /*
1169 * Call the device specific close. This cannot fail.
1170 * Only if device is UP
1171 *
1172 * We allow it to be called even after a DETACH hot-plug
1173 * event.
1174 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001175 if (ops->ndo_stop)
1176 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 /*
1179 * Device is now down.
1180 */
1181
1182 dev->flags &= ~IFF_UP;
1183
1184 /*
1185 * Tell people we are down
1186 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001187 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Dan Williams649274d2009-01-11 00:20:39 -08001189 /*
1190 * Shutdown NET_DMA
1191 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001192 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 return 0;
1195}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001196EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001199/**
1200 * dev_disable_lro - disable Large Receive Offload on a device
1201 * @dev: device
1202 *
1203 * Disable Large Receive Offload (LRO) on a net device. Must be
1204 * called under RTNL. This is needed if received packets may be
1205 * forwarded to another interface.
1206 */
1207void dev_disable_lro(struct net_device *dev)
1208{
1209 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1210 dev->ethtool_ops->set_flags) {
1211 u32 flags = dev->ethtool_ops->get_flags(dev);
1212 if (flags & ETH_FLAG_LRO) {
1213 flags &= ~ETH_FLAG_LRO;
1214 dev->ethtool_ops->set_flags(dev, flags);
1215 }
1216 }
1217 WARN_ON(dev->features & NETIF_F_LRO);
1218}
1219EXPORT_SYMBOL(dev_disable_lro);
1220
1221
Eric W. Biederman881d9662007-09-17 11:56:21 -07001222static int dev_boot_phase = 1;
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224/*
1225 * Device change register/unregister. These are not inline or static
1226 * as we export them to the world.
1227 */
1228
1229/**
1230 * register_netdevice_notifier - register a network notifier block
1231 * @nb: notifier
1232 *
1233 * Register a notifier to be called when network device events occur.
1234 * The notifier passed is linked into the kernel structures and must
1235 * not be reused until it has been unregistered. A negative errno code
1236 * is returned on a failure.
1237 *
1238 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001239 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 * view of the network device list.
1241 */
1242
1243int register_netdevice_notifier(struct notifier_block *nb)
1244{
1245 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001246 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001247 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 int err;
1249
1250 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001251 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001252 if (err)
1253 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001254 if (dev_boot_phase)
1255 goto unlock;
1256 for_each_net(net) {
1257 for_each_netdev(net, dev) {
1258 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1259 err = notifier_to_errno(err);
1260 if (err)
1261 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
Eric W. Biederman881d9662007-09-17 11:56:21 -07001263 if (!(dev->flags & IFF_UP))
1264 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001265
Eric W. Biederman881d9662007-09-17 11:56:21 -07001266 nb->notifier_call(nb, NETDEV_UP, dev);
1267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001269
1270unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 rtnl_unlock();
1272 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001273
1274rollback:
1275 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001276 for_each_net(net) {
1277 for_each_netdev(net, dev) {
1278 if (dev == last)
1279 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001280
Eric W. Biederman881d9662007-09-17 11:56:21 -07001281 if (dev->flags & IFF_UP) {
1282 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1283 nb->notifier_call(nb, NETDEV_DOWN, dev);
1284 }
1285 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001286 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001287 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001288
1289 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001290 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001292EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294/**
1295 * unregister_netdevice_notifier - unregister a network notifier block
1296 * @nb: notifier
1297 *
1298 * Unregister a notifier previously registered by
1299 * register_netdevice_notifier(). The notifier is unlinked into the
1300 * kernel structures and may then be reused. A negative errno code
1301 * is returned on a failure.
1302 */
1303
1304int unregister_netdevice_notifier(struct notifier_block *nb)
1305{
Herbert Xu9f514952006-03-25 01:24:25 -08001306 int err;
1307
1308 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001309 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001310 rtnl_unlock();
1311 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001313EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315/**
1316 * call_netdevice_notifiers - call all network notifier blocks
1317 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001318 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 *
1320 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001321 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 */
1323
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001324int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001326 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327}
1328
1329/* When > 0 there are consumers of rx skb time stamps */
1330static atomic_t netstamp_needed = ATOMIC_INIT(0);
1331
1332void net_enable_timestamp(void)
1333{
1334 atomic_inc(&netstamp_needed);
1335}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001336EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338void net_disable_timestamp(void)
1339{
1340 atomic_dec(&netstamp_needed);
1341}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001342EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001344static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
1346 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001347 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001348 else
1349 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350}
1351
1352/*
1353 * Support routine. Sends outgoing frames to any network
1354 * taps currently in use.
1355 */
1356
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001357static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358{
1359 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001360
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001361#ifdef CONFIG_NET_CLS_ACT
1362 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1363 net_timestamp(skb);
1364#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001365 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001366#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 rcu_read_lock();
1369 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1370 /* Never send packets back to the socket
1371 * they originated from - MvS (miquels@drinkel.ow.org)
1372 */
1373 if ((ptype->dev == dev || !ptype->dev) &&
1374 (ptype->af_packet_priv == NULL ||
1375 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001376 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 if (!skb2)
1378 break;
1379
1380 /* skb->nh should be correctly
1381 set by sender, so that the second statement is
1382 just protection against buggy protocols.
1383 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001384 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001386 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001387 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 if (net_ratelimit())
1389 printk(KERN_CRIT "protocol %04x is "
1390 "buggy, dev %s\n",
1391 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001392 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001395 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001397 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 }
1399 }
1400 rcu_read_unlock();
1401}
1402
Denis Vlasenko56079432006-03-29 15:57:29 -08001403
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001404static inline void __netif_reschedule(struct Qdisc *q)
1405{
1406 struct softnet_data *sd;
1407 unsigned long flags;
1408
1409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 q->next_sched = sd->output_queue;
1412 sd->output_queue = q;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1415}
1416
David S. Miller37437bb2008-07-16 02:15:04 -07001417void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001418{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001419 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1420 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001421}
1422EXPORT_SYMBOL(__netif_schedule);
1423
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001424void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001425{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001426 if (atomic_dec_and_test(&skb->users)) {
1427 struct softnet_data *sd;
1428 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001429
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001430 local_irq_save(flags);
1431 sd = &__get_cpu_var(softnet_data);
1432 skb->next = sd->completion_queue;
1433 sd->completion_queue = skb;
1434 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1435 local_irq_restore(flags);
1436 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001437}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001438EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001439
1440void dev_kfree_skb_any(struct sk_buff *skb)
1441{
1442 if (in_irq() || irqs_disabled())
1443 dev_kfree_skb_irq(skb);
1444 else
1445 dev_kfree_skb(skb);
1446}
1447EXPORT_SYMBOL(dev_kfree_skb_any);
1448
1449
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001450/**
1451 * netif_device_detach - mark device as removed
1452 * @dev: network device
1453 *
1454 * Mark device as removed from system and therefore no longer available.
1455 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001456void netif_device_detach(struct net_device *dev)
1457{
1458 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1459 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001460 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001461 }
1462}
1463EXPORT_SYMBOL(netif_device_detach);
1464
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001465/**
1466 * netif_device_attach - mark device as attached
1467 * @dev: network device
1468 *
1469 * Mark device as attached from system and restart if needed.
1470 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001471void netif_device_attach(struct net_device *dev)
1472{
1473 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1474 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001475 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001476 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001477 }
1478}
1479EXPORT_SYMBOL(netif_device_attach);
1480
Ben Hutchings6de329e2008-06-16 17:02:28 -07001481static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1482{
1483 return ((features & NETIF_F_GEN_CSUM) ||
1484 ((features & NETIF_F_IP_CSUM) &&
1485 protocol == htons(ETH_P_IP)) ||
1486 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001487 protocol == htons(ETH_P_IPV6)) ||
1488 ((features & NETIF_F_FCOE_CRC) &&
1489 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001490}
1491
1492static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1493{
1494 if (can_checksum_protocol(dev->features, skb->protocol))
1495 return true;
1496
1497 if (skb->protocol == htons(ETH_P_8021Q)) {
1498 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1499 if (can_checksum_protocol(dev->features & dev->vlan_features,
1500 veh->h_vlan_encapsulated_proto))
1501 return true;
1502 }
1503
1504 return false;
1505}
Denis Vlasenko56079432006-03-29 15:57:29 -08001506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507/*
1508 * Invalidate hardware checksum when packet is to be mangled, and
1509 * complete checksum manually on outgoing path.
1510 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001511int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512{
Al Virod3bc23e2006-11-14 21:24:49 -08001513 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001514 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
Patrick McHardy84fa7932006-08-29 16:44:56 -07001516 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001517 goto out_set_summed;
1518
1519 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001520 /* Let GSO fix up the checksum. */
1521 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 }
1523
Herbert Xua0308472007-10-15 01:47:15 -07001524 offset = skb->csum_start - skb_headroom(skb);
1525 BUG_ON(offset >= skb_headlen(skb));
1526 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1527
1528 offset += skb->csum_offset;
1529 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1530
1531 if (skb_cloned(skb) &&
1532 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1534 if (ret)
1535 goto out;
1536 }
1537
Herbert Xua0308472007-10-15 01:47:15 -07001538 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001539out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001541out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return ret;
1543}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001544EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001546/**
1547 * skb_gso_segment - Perform segmentation on skb.
1548 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001549 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001550 *
1551 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001552 *
1553 * It may return NULL if the skb requires no segmentation. This is
1554 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001555 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001556struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001557{
1558 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1559 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001560 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001561 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001562
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001563 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001564 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001565 __skb_pull(skb, skb->mac_len);
1566
Herbert Xu67fd1a72009-01-19 16:26:44 -08001567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1568 struct net_device *dev = skb->dev;
1569 struct ethtool_drvinfo info = {};
1570
1571 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1572 dev->ethtool_ops->get_drvinfo(dev, &info);
1573
1574 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1575 "ip_summed=%d",
1576 info.driver, dev ? dev->features : 0L,
1577 skb->sk ? skb->sk->sk_route_caps : 0L,
1578 skb->len, skb->data_len, skb->ip_summed);
1579
Herbert Xua430a432006-07-08 13:34:56 -07001580 if (skb_header_cloned(skb) &&
1581 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1582 return ERR_PTR(err);
1583 }
1584
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001585 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001586 list_for_each_entry_rcu(ptype,
1587 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001588 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001589 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001590 err = ptype->gso_send_check(skb);
1591 segs = ERR_PTR(err);
1592 if (err || skb_gso_ok(skb, features))
1593 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001594 __skb_push(skb, (skb->data -
1595 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001596 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001597 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001598 break;
1599 }
1600 }
1601 rcu_read_unlock();
1602
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001603 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001604
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605 return segs;
1606}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001607EXPORT_SYMBOL(skb_gso_segment);
1608
Herbert Xufb286bb2005-11-10 13:01:24 -08001609/* Take action when hardware reception checksum errors are detected. */
1610#ifdef CONFIG_BUG
1611void netdev_rx_csum_fault(struct net_device *dev)
1612{
1613 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001614 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001615 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001616 dump_stack();
1617 }
1618}
1619EXPORT_SYMBOL(netdev_rx_csum_fault);
1620#endif
1621
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622/* Actually, we should eliminate this check as soon as we know, that:
1623 * 1. IOMMU is present and allows to map all the memory.
1624 * 2. No high memory really exists on this machine.
1625 */
1626
1627static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1628{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001629#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 int i;
1631
1632 if (dev->features & NETIF_F_HIGHDMA)
1633 return 0;
1634
1635 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1636 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1637 return 1;
1638
Herbert Xu3d3a8532006-06-27 13:33:10 -07001639#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return 0;
1641}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001643struct dev_gso_cb {
1644 void (*destructor)(struct sk_buff *skb);
1645};
1646
1647#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1648
1649static void dev_gso_skb_destructor(struct sk_buff *skb)
1650{
1651 struct dev_gso_cb *cb;
1652
1653 do {
1654 struct sk_buff *nskb = skb->next;
1655
1656 skb->next = nskb->next;
1657 nskb->next = NULL;
1658 kfree_skb(nskb);
1659 } while (skb->next);
1660
1661 cb = DEV_GSO_CB(skb);
1662 if (cb->destructor)
1663 cb->destructor(skb);
1664}
1665
1666/**
1667 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1668 * @skb: buffer to segment
1669 *
1670 * This function segments the given skb and stores the list of segments
1671 * in skb->next.
1672 */
1673static int dev_gso_segment(struct sk_buff *skb)
1674{
1675 struct net_device *dev = skb->dev;
1676 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001677 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1678 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001679
Herbert Xu576a30e2006-06-27 13:22:38 -07001680 segs = skb_gso_segment(skb, features);
1681
1682 /* Verifying header integrity only. */
1683 if (!segs)
1684 return 0;
1685
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001686 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001687 return PTR_ERR(segs);
1688
1689 skb->next = segs;
1690 DEV_GSO_CB(skb)->destructor = skb->destructor;
1691 skb->destructor = dev_gso_skb_destructor;
1692
1693 return 0;
1694}
1695
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001696int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1697 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698{
Stephen Hemminger00829822008-11-20 20:14:53 -08001699 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001700 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001701
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001702 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001703 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001704 dev_queue_xmit_nit(skb, dev);
1705
Herbert Xu576a30e2006-06-27 13:22:38 -07001706 if (netif_needs_gso(dev, skb)) {
1707 if (unlikely(dev_gso_segment(skb)))
1708 goto out_kfree_skb;
1709 if (skb->next)
1710 goto gso;
1711 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001712
Eric Dumazet93f154b2009-05-18 22:19:19 -07001713 /*
1714 * If device doesnt need skb->dst, release it right now while
1715 * its hot in this cpu cache
1716 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001717 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1718 skb_dst_drop(skb);
1719
Patrick Ohlyac45f602009-02-12 05:03:37 +00001720 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001721 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001722 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001723 /*
1724 * TODO: if skb_orphan() was called by
1725 * dev->hard_start_xmit() (for example, the unmodified
1726 * igb driver does that; bnx2 doesn't), then
1727 * skb_tx_software_timestamp() will be unable to send
1728 * back the time stamp.
1729 *
1730 * How can this be prevented? Always create another
1731 * reference to the socket before calling
1732 * dev->hard_start_xmit()? Prevent that skb_orphan()
1733 * does anything in dev->hard_start_xmit() by clearing
1734 * the skb destructor before the call and restoring it
1735 * afterwards, then doing the skb_orphan() ourselves?
1736 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001737 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001738 }
1739
Herbert Xu576a30e2006-06-27 13:22:38 -07001740gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741 do {
1742 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001743
1744 skb->next = nskb->next;
1745 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001746 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001747 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001748 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001749 skb->next = nskb;
1750 return rc;
1751 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001752 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001753 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001754 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001755 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001756
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001757 skb->destructor = DEV_GSO_CB(skb)->destructor;
1758
1759out_kfree_skb:
1760 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001761 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001762}
1763
David S. Miller70192982009-01-27 16:34:47 -08001764static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001765
Stephen Hemminger92477442009-03-21 13:39:26 -07001766u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001767{
David S. Miller70192982009-01-27 16:34:47 -08001768 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001769
David S. Miller513de112009-05-03 14:43:10 -07001770 if (skb_rx_queue_recorded(skb)) {
1771 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001772 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001773 hash -= dev->real_num_tx_queues;
1774 return hash;
1775 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001776
1777 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001778 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001779 else
David S. Miller70192982009-01-27 16:34:47 -08001780 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001781
David S. Miller70192982009-01-27 16:34:47 -08001782 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001783
David S. Millerb6b2fed2008-07-21 09:48:06 -07001784 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001785}
Stephen Hemminger92477442009-03-21 13:39:26 -07001786EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001787
David S. Millere8a04642008-07-17 00:34:19 -07001788static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1789 struct sk_buff *skb)
1790{
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001791 u16 queue_index;
1792 struct sock *sk = skb->sk;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001793
Krishna Kumara4ee3ce2009-10-19 23:50:07 +00001794 if (sk_tx_queue_recorded(sk)) {
1795 queue_index = sk_tx_queue_get(sk);
1796 } else {
1797 const struct net_device_ops *ops = dev->netdev_ops;
1798
1799 if (ops->ndo_select_queue) {
1800 queue_index = ops->ndo_select_queue(dev, skb);
1801 } else {
1802 queue_index = 0;
1803 if (dev->real_num_tx_queues > 1)
1804 queue_index = skb_tx_hash(dev, skb);
1805
1806 if (sk && sk->sk_dst_cache)
1807 sk_tx_queue_set(sk, queue_index);
1808 }
1809 }
David S. Millereae792b2008-07-15 03:03:33 -07001810
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001811 skb_set_queue_mapping(skb, queue_index);
1812 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001813}
1814
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001815static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1816 struct net_device *dev,
1817 struct netdev_queue *txq)
1818{
1819 spinlock_t *root_lock = qdisc_lock(q);
1820 int rc;
1821
1822 spin_lock(root_lock);
1823 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1824 kfree_skb(skb);
1825 rc = NET_XMIT_DROP;
1826 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1827 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1828 /*
1829 * This is a work-conserving queue; there are no old skbs
1830 * waiting to be sent out; and the qdisc is not running -
1831 * xmit the skb directly.
1832 */
1833 __qdisc_update_bstats(q, skb->len);
1834 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1835 __qdisc_run(q);
1836 else
1837 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1838
1839 rc = NET_XMIT_SUCCESS;
1840 } else {
1841 rc = qdisc_enqueue_root(skb, q);
1842 qdisc_run(q);
1843 }
1844 spin_unlock(root_lock);
1845
1846 return rc;
1847}
1848
Dave Jonesd29f7492008-07-22 14:09:06 -07001849/**
1850 * dev_queue_xmit - transmit a buffer
1851 * @skb: buffer to transmit
1852 *
1853 * Queue a buffer for transmission to a network device. The caller must
1854 * have set the device and priority and built the buffer before calling
1855 * this function. The function can be called from an interrupt.
1856 *
1857 * A negative errno code is returned on a failure. A success does not
1858 * guarantee the frame will be transmitted as it may be dropped due
1859 * to congestion or traffic shaping.
1860 *
1861 * -----------------------------------------------------------------------------------
1862 * I notice this method can also return errors from the queue disciplines,
1863 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1864 * be positive.
1865 *
1866 * Regardless of the return value, the skb is consumed, so it is currently
1867 * difficult to retry a send to this method. (You can bump the ref count
1868 * before sending to hold a reference for retry if you are careful.)
1869 *
1870 * When calling this method, interrupts MUST be enabled. This is because
1871 * the BH enable code must have IRQs enabled so that it will not deadlock.
1872 * --BLG
1873 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874int dev_queue_xmit(struct sk_buff *skb)
1875{
1876 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001877 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 struct Qdisc *q;
1879 int rc = -ENOMEM;
1880
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001881 /* GSO will handle the following emulations directly. */
1882 if (netif_needs_gso(dev, skb))
1883 goto gso;
1884
David S. Miller4cf704f2009-06-09 00:18:51 -07001885 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001887 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 goto out_kfree_skb;
1889
1890 /* Fragmented skb is linearized if device does not support SG,
1891 * or if at least one of fragments is in highmem and device
1892 * does not support DMA from it.
1893 */
1894 if (skb_shinfo(skb)->nr_frags &&
1895 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001896 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 goto out_kfree_skb;
1898
1899 /* If packet is not checksummed and device does not support
1900 * checksumming for this protocol, complete checksumming here.
1901 */
Herbert Xu663ead32007-04-09 11:59:07 -07001902 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1903 skb_set_transport_header(skb, skb->csum_start -
1904 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001905 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1906 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001909gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001910 /* Disable soft irqs for various locks below. Also
1911 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001913 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
David S. Millereae792b2008-07-15 03:03:33 -07001915 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001916 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001919 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920#endif
1921 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001922 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001923 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
1925
1926 /* The device has no queue. Common case for software devices:
1927 loopback, all the sorts of tunnels...
1928
Herbert Xu932ff272006-06-09 12:20:56 -07001929 Really, it is unlikely that netif_tx_lock protection is necessary
1930 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 counters.)
1932 However, it is possible, that they rely on protection
1933 made by us here.
1934
1935 Check this and shot the lock. It is not prone from deadlocks.
1936 Either shot noqueue qdisc, it is even simpler 8)
1937 */
1938 if (dev->flags & IFF_UP) {
1939 int cpu = smp_processor_id(); /* ok because BHs are off */
1940
David S. Millerc773e842008-07-08 23:13:53 -07001941 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
David S. Millerc773e842008-07-08 23:13:53 -07001943 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001945 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00001946 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001947 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001948 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 goto out;
1950 }
1951 }
David S. Millerc773e842008-07-08 23:13:53 -07001952 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 if (net_ratelimit())
1954 printk(KERN_CRIT "Virtual device %s asks to "
1955 "queue packet!\n", dev->name);
1956 } else {
1957 /* Recursion is detected! It is possible,
1958 * unfortunately */
1959 if (net_ratelimit())
1960 printk(KERN_CRIT "Dead loop on virtual device "
1961 "%s, fix it urgently!\n", dev->name);
1962 }
1963 }
1964
1965 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001966 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968out_kfree_skb:
1969 kfree_skb(skb);
1970 return rc;
1971out:
Herbert Xud4828d82006-06-22 02:28:18 -07001972 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 return rc;
1974}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001975EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977
1978/*=======================================================================
1979 Receiver routines
1980 =======================================================================*/
1981
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001982int netdev_max_backlog __read_mostly = 1000;
1983int netdev_budget __read_mostly = 300;
1984int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1987
1988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989/**
1990 * netif_rx - post buffer to the network code
1991 * @skb: buffer to post
1992 *
1993 * This function receives a packet from a device driver and queues it for
1994 * the upper (protocol) levels to process. It always succeeds. The buffer
1995 * may be dropped during processing for congestion control or by the
1996 * protocol layers.
1997 *
1998 * return values:
1999 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 * NET_RX_DROP (packet was dropped)
2001 *
2002 */
2003
2004int netif_rx(struct sk_buff *skb)
2005{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 struct softnet_data *queue;
2007 unsigned long flags;
2008
2009 /* if netpoll wants it, pretend we never saw it */
2010 if (netpoll_rx(skb))
2011 return NET_RX_DROP;
2012
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002013 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002014 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
2016 /*
2017 * The code is rearranged so that the path is the most
2018 * short when CPU is congested, but is still operating.
2019 */
2020 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 queue = &__get_cpu_var(softnet_data);
2022
2023 __get_cpu_var(netdev_rx_stat).total++;
2024 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2025 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002029 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 }
2031
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002032 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 goto enqueue;
2034 }
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 __get_cpu_var(netdev_rx_stat).dropped++;
2037 local_irq_restore(flags);
2038
2039 kfree_skb(skb);
2040 return NET_RX_DROP;
2041}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002042EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
2044int netif_rx_ni(struct sk_buff *skb)
2045{
2046 int err;
2047
2048 preempt_disable();
2049 err = netif_rx(skb);
2050 if (local_softirq_pending())
2051 do_softirq();
2052 preempt_enable();
2053
2054 return err;
2055}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056EXPORT_SYMBOL(netif_rx_ni);
2057
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058static void net_tx_action(struct softirq_action *h)
2059{
2060 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2061
2062 if (sd->completion_queue) {
2063 struct sk_buff *clist;
2064
2065 local_irq_disable();
2066 clist = sd->completion_queue;
2067 sd->completion_queue = NULL;
2068 local_irq_enable();
2069
2070 while (clist) {
2071 struct sk_buff *skb = clist;
2072 clist = clist->next;
2073
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002074 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 __kfree_skb(skb);
2076 }
2077 }
2078
2079 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002080 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 local_irq_disable();
2083 head = sd->output_queue;
2084 sd->output_queue = NULL;
2085 local_irq_enable();
2086
2087 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002088 struct Qdisc *q = head;
2089 spinlock_t *root_lock;
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 head = head->next_sched;
2092
David S. Miller5fb66222008-08-02 20:02:43 -07002093 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002094 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002095 smp_mb__before_clear_bit();
2096 clear_bit(__QDISC_STATE_SCHED,
2097 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002098 qdisc_run(q);
2099 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002101 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002102 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002103 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002104 } else {
2105 smp_mb__before_clear_bit();
2106 clear_bit(__QDISC_STATE_SCHED,
2107 &q->state);
2108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 }
2110 }
2111 }
2112}
2113
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002114static inline int deliver_skb(struct sk_buff *skb,
2115 struct packet_type *pt_prev,
2116 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117{
2118 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002119 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120}
2121
2122#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002123
2124#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2125/* This hook is defined here for ATM LANE */
2126int (*br_fdb_test_addr_hook)(struct net_device *dev,
2127 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002128EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002129#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Stephen Hemminger6229e362007-03-21 13:38:47 -07002131/*
2132 * If bridge module is loaded call bridging hook.
2133 * returns NULL if packet was consumed.
2134 */
2135struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2136 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002137EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002138
Stephen Hemminger6229e362007-03-21 13:38:47 -07002139static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2140 struct packet_type **pt_prev, int *ret,
2141 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142{
2143 struct net_bridge_port *port;
2144
Stephen Hemminger6229e362007-03-21 13:38:47 -07002145 if (skb->pkt_type == PACKET_LOOPBACK ||
2146 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2147 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
2149 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002150 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002152 }
2153
Stephen Hemminger6229e362007-03-21 13:38:47 -07002154 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002157#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158#endif
2159
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002160#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2161struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2162EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2163
2164static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2165 struct packet_type **pt_prev,
2166 int *ret,
2167 struct net_device *orig_dev)
2168{
2169 if (skb->dev->macvlan_port == NULL)
2170 return skb;
2171
2172 if (*pt_prev) {
2173 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2174 *pt_prev = NULL;
2175 }
2176 return macvlan_handle_frame_hook(skb);
2177}
2178#else
2179#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2180#endif
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182#ifdef CONFIG_NET_CLS_ACT
2183/* TODO: Maybe we should just force sch_ingress to be compiled in
2184 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2185 * a compare and 2 stores extra right now if we dont have it on
2186 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002187 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 * the ingress scheduler, you just cant add policies on ingress.
2189 *
2190 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002191static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002194 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002195 struct netdev_queue *rxq;
2196 int result = TC_ACT_OK;
2197 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002198
Herbert Xuf697c3e2007-10-14 00:38:47 -07002199 if (MAX_RED_LOOP < ttl++) {
2200 printk(KERN_WARNING
2201 "Redir loop detected Dropping packet (%d->%d)\n",
2202 skb->iif, dev->ifindex);
2203 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
2205
Herbert Xuf697c3e2007-10-14 00:38:47 -07002206 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2207 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2208
David S. Miller555353c2008-07-08 17:33:13 -07002209 rxq = &dev->rx_queue;
2210
David S. Miller83874002008-07-17 00:53:03 -07002211 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002212 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002213 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002214 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2215 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002216 spin_unlock(qdisc_lock(q));
2217 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002218
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 return result;
2220}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002221
2222static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2223 struct packet_type **pt_prev,
2224 int *ret, struct net_device *orig_dev)
2225{
David S. Miller8d50b532008-07-30 02:37:46 -07002226 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002227 goto out;
2228
2229 if (*pt_prev) {
2230 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2231 *pt_prev = NULL;
2232 } else {
2233 /* Huh? Why does turning on AF_PACKET affect this? */
2234 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2235 }
2236
2237 switch (ing_filter(skb)) {
2238 case TC_ACT_SHOT:
2239 case TC_ACT_STOLEN:
2240 kfree_skb(skb);
2241 return NULL;
2242 }
2243
2244out:
2245 skb->tc_verd = 0;
2246 return skb;
2247}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248#endif
2249
Patrick McHardybc1d0412008-07-14 22:49:30 -07002250/*
2251 * netif_nit_deliver - deliver received packets to network taps
2252 * @skb: buffer
2253 *
2254 * This function is used to deliver incoming packets to network
2255 * taps. It should be used when the normal netif_receive_skb path
2256 * is bypassed, for example because of VLAN acceleration.
2257 */
2258void netif_nit_deliver(struct sk_buff *skb)
2259{
2260 struct packet_type *ptype;
2261
2262 if (list_empty(&ptype_all))
2263 return;
2264
2265 skb_reset_network_header(skb);
2266 skb_reset_transport_header(skb);
2267 skb->mac_len = skb->network_header - skb->mac_header;
2268
2269 rcu_read_lock();
2270 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2271 if (!ptype->dev || ptype->dev == skb->dev)
2272 deliver_skb(skb, ptype, skb->dev);
2273 }
2274 rcu_read_unlock();
2275}
2276
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002277/**
2278 * netif_receive_skb - process receive buffer from network
2279 * @skb: buffer to process
2280 *
2281 * netif_receive_skb() is the main receive data processing function.
2282 * It always succeeds. The buffer may be dropped during processing
2283 * for congestion control or by the protocol layers.
2284 *
2285 * This function may only be called from softirq context and interrupts
2286 * should be enabled.
2287 *
2288 * Return values (usually ignored):
2289 * NET_RX_SUCCESS: no congestion
2290 * NET_RX_DROP: packet was dropped
2291 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292int netif_receive_skb(struct sk_buff *skb)
2293{
2294 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002295 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002296 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002298 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002300 if (!skb->tstamp.tv64)
2301 net_timestamp(skb);
2302
Eric Dumazet05423b22009-10-26 18:40:35 -07002303 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002304 return NET_RX_SUCCESS;
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002307 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 return NET_RX_DROP;
2309
Patrick McHardyc01003c2007-03-29 11:46:52 -07002310 if (!skb->iif)
2311 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002312
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002313 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002314 orig_dev = skb->dev;
2315 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002316 if (skb_bond_should_drop(skb))
2317 null_or_orig = orig_dev; /* deliver only exact match */
2318 else
2319 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002320 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 __get_cpu_var(netdev_rx_stat).total++;
2323
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002324 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002325 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002326 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 pt_prev = NULL;
2329
2330 rcu_read_lock();
2331
2332#ifdef CONFIG_NET_CLS_ACT
2333 if (skb->tc_verd & TC_NCLS) {
2334 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2335 goto ncls;
2336 }
2337#endif
2338
2339 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002340 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2341 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002342 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002343 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 pt_prev = ptype;
2345 }
2346 }
2347
2348#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002349 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2350 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352ncls:
2353#endif
2354
Stephen Hemminger6229e362007-03-21 13:38:47 -07002355 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2356 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002358 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2359 if (!skb)
2360 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361
2362 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002363 list_for_each_entry_rcu(ptype,
2364 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002366 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2367 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002368 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002369 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 pt_prev = ptype;
2371 }
2372 }
2373
2374 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002375 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 } else {
2377 kfree_skb(skb);
2378 /* Jamal, now you will not able to escape explaining
2379 * me how you were going to use this. :-)
2380 */
2381 ret = NET_RX_DROP;
2382 }
2383
2384out:
2385 rcu_read_unlock();
2386 return ret;
2387}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002388EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002390/* Network device is going away, flush any packets still pending */
2391static void flush_backlog(void *arg)
2392{
2393 struct net_device *dev = arg;
2394 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2395 struct sk_buff *skb, *tmp;
2396
2397 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2398 if (skb->dev == dev) {
2399 __skb_unlink(skb, &queue->input_pkt_queue);
2400 kfree_skb(skb);
2401 }
2402}
2403
Herbert Xud565b0a2008-12-15 23:38:52 -08002404static int napi_gro_complete(struct sk_buff *skb)
2405{
2406 struct packet_type *ptype;
2407 __be16 type = skb->protocol;
2408 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2409 int err = -ENOENT;
2410
Herbert Xufc59f9a2009-04-14 15:11:06 -07002411 if (NAPI_GRO_CB(skb)->count == 1) {
2412 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002413 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002414 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002415
2416 rcu_read_lock();
2417 list_for_each_entry_rcu(ptype, head, list) {
2418 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2419 continue;
2420
2421 err = ptype->gro_complete(skb);
2422 break;
2423 }
2424 rcu_read_unlock();
2425
2426 if (err) {
2427 WARN_ON(&ptype->list == head);
2428 kfree_skb(skb);
2429 return NET_RX_SUCCESS;
2430 }
2431
2432out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002433 return netif_receive_skb(skb);
2434}
2435
2436void napi_gro_flush(struct napi_struct *napi)
2437{
2438 struct sk_buff *skb, *next;
2439
2440 for (skb = napi->gro_list; skb; skb = next) {
2441 next = skb->next;
2442 skb->next = NULL;
2443 napi_gro_complete(skb);
2444 }
2445
Herbert Xu4ae55442009-02-08 18:00:36 +00002446 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002447 napi->gro_list = NULL;
2448}
2449EXPORT_SYMBOL(napi_gro_flush);
2450
Herbert Xu96e93ea2009-01-06 10:49:34 -08002451int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002452{
2453 struct sk_buff **pp = NULL;
2454 struct packet_type *ptype;
2455 __be16 type = skb->protocol;
2456 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002457 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002458 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002459 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002460
2461 if (!(skb->dev->features & NETIF_F_GRO))
2462 goto normal;
2463
David S. Miller4cf704f2009-06-09 00:18:51 -07002464 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002465 goto normal;
2466
Herbert Xud565b0a2008-12-15 23:38:52 -08002467 rcu_read_lock();
2468 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002469 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2470 continue;
2471
Herbert Xu86911732009-01-29 14:19:50 +00002472 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002473 mac_len = skb->network_header - skb->mac_header;
2474 skb->mac_len = mac_len;
2475 NAPI_GRO_CB(skb)->same_flow = 0;
2476 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002477 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002478
Herbert Xud565b0a2008-12-15 23:38:52 -08002479 pp = ptype->gro_receive(&napi->gro_list, skb);
2480 break;
2481 }
2482 rcu_read_unlock();
2483
2484 if (&ptype->list == head)
2485 goto normal;
2486
Herbert Xu0da2afd52008-12-26 14:57:42 -08002487 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002488 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002489
Herbert Xud565b0a2008-12-15 23:38:52 -08002490 if (pp) {
2491 struct sk_buff *nskb = *pp;
2492
2493 *pp = nskb->next;
2494 nskb->next = NULL;
2495 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002496 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002497 }
2498
Herbert Xu0da2afd52008-12-26 14:57:42 -08002499 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002500 goto ok;
2501
Herbert Xu4ae55442009-02-08 18:00:36 +00002502 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002503 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002504
Herbert Xu4ae55442009-02-08 18:00:36 +00002505 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002506 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002507 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002508 skb->next = napi->gro_list;
2509 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002510 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002511
Herbert Xuad0f9902009-02-01 01:24:55 -08002512pull:
Herbert Xucb189782009-05-26 18:50:31 +00002513 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2514 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2515
2516 BUG_ON(skb->end - skb->tail < grow);
2517
2518 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2519
2520 skb->tail += grow;
2521 skb->data_len -= grow;
2522
2523 skb_shinfo(skb)->frags[0].page_offset += grow;
2524 skb_shinfo(skb)->frags[0].size -= grow;
2525
2526 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2527 put_page(skb_shinfo(skb)->frags[0].page);
2528 memmove(skb_shinfo(skb)->frags,
2529 skb_shinfo(skb)->frags + 1,
2530 --skb_shinfo(skb)->nr_frags);
2531 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002532 }
2533
Herbert Xud565b0a2008-12-15 23:38:52 -08002534ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002535 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002536
2537normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002538 ret = GRO_NORMAL;
2539 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002540}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002541EXPORT_SYMBOL(dev_gro_receive);
2542
2543static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2544{
2545 struct sk_buff *p;
2546
Herbert Xud1c76af2009-03-16 10:50:02 -07002547 if (netpoll_rx_on(skb))
2548 return GRO_NORMAL;
2549
Herbert Xu96e93ea2009-01-06 10:49:34 -08002550 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002551 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2552 && !compare_ether_header(skb_mac_header(p),
2553 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002554 NAPI_GRO_CB(p)->flush = 0;
2555 }
2556
2557 return dev_gro_receive(napi, skb);
2558}
Herbert Xu5d38a072009-01-04 16:13:40 -08002559
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002560int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002561{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002562 int err = NET_RX_SUCCESS;
2563
2564 switch (ret) {
2565 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002566 return netif_receive_skb(skb);
2567
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002568 case GRO_DROP:
2569 err = NET_RX_DROP;
2570 /* fall through */
2571
2572 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002573 kfree_skb(skb);
2574 break;
2575 }
2576
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002577 return err;
2578}
2579EXPORT_SYMBOL(napi_skb_finish);
2580
Herbert Xu78a478d2009-05-26 18:50:21 +00002581void skb_gro_reset_offset(struct sk_buff *skb)
2582{
2583 NAPI_GRO_CB(skb)->data_offset = 0;
2584 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002585 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002586
Herbert Xu78d3fd02009-05-26 18:50:23 +00002587 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002588 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002589 NAPI_GRO_CB(skb)->frag0 =
2590 page_address(skb_shinfo(skb)->frags[0].page) +
2591 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002592 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2593 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002594}
2595EXPORT_SYMBOL(skb_gro_reset_offset);
2596
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002597int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2598{
Herbert Xu86911732009-01-29 14:19:50 +00002599 skb_gro_reset_offset(skb);
2600
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002601 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002602}
2603EXPORT_SYMBOL(napi_gro_receive);
2604
Herbert Xu96e93ea2009-01-06 10:49:34 -08002605void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2606{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002607 __skb_pull(skb, skb_headlen(skb));
2608 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2609
2610 napi->skb = skb;
2611}
2612EXPORT_SYMBOL(napi_reuse_skb);
2613
Herbert Xu76620aa2009-04-16 02:02:07 -07002614struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002615{
Herbert Xu5d38a072009-01-04 16:13:40 -08002616 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002617
2618 if (!skb) {
Eric Dumazet89d71a62009-10-13 05:34:20 +00002619 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
2620 if (skb)
2621 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002622 }
Herbert Xu96e93ea2009-01-06 10:49:34 -08002623 return skb;
2624}
Herbert Xu76620aa2009-04-16 02:02:07 -07002625EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002626
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002627int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2628{
2629 int err = NET_RX_SUCCESS;
2630
2631 switch (ret) {
2632 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002633 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002634 skb->protocol = eth_type_trans(skb, napi->dev);
2635
2636 if (ret == GRO_NORMAL)
2637 return netif_receive_skb(skb);
2638
2639 skb_gro_pull(skb, -ETH_HLEN);
2640 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002641
2642 case GRO_DROP:
2643 err = NET_RX_DROP;
2644 /* fall through */
2645
2646 case GRO_MERGED_FREE:
2647 napi_reuse_skb(napi, skb);
2648 break;
2649 }
2650
2651 return err;
2652}
2653EXPORT_SYMBOL(napi_frags_finish);
2654
Herbert Xu76620aa2009-04-16 02:02:07 -07002655struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002656{
Herbert Xu76620aa2009-04-16 02:02:07 -07002657 struct sk_buff *skb = napi->skb;
2658 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002659 unsigned int hlen;
2660 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002661
2662 napi->skb = NULL;
2663
2664 skb_reset_mac_header(skb);
2665 skb_gro_reset_offset(skb);
2666
Herbert Xua5b1cf22009-05-26 18:50:28 +00002667 off = skb_gro_offset(skb);
2668 hlen = off + sizeof(*eth);
2669 eth = skb_gro_header_fast(skb, off);
2670 if (skb_gro_header_hard(skb, hlen)) {
2671 eth = skb_gro_header_slow(skb, hlen, off);
2672 if (unlikely(!eth)) {
2673 napi_reuse_skb(napi, skb);
2674 skb = NULL;
2675 goto out;
2676 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002677 }
2678
2679 skb_gro_pull(skb, sizeof(*eth));
2680
2681 /*
2682 * This works because the only protocols we care about don't require
2683 * special handling. We'll fix it up properly at the end.
2684 */
2685 skb->protocol = eth->h_proto;
2686
2687out:
2688 return skb;
2689}
2690EXPORT_SYMBOL(napi_frags_skb);
2691
2692int napi_gro_frags(struct napi_struct *napi)
2693{
2694 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002695
2696 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002697 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002698
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002699 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002700}
2701EXPORT_SYMBOL(napi_gro_frags);
2702
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002703static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704{
2705 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2707 unsigned long start_time = jiffies;
2708
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002709 napi->weight = weight_p;
2710 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712
2713 local_irq_disable();
2714 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002715 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002716 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002717 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002718 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 local_irq_enable();
2721
Herbert Xu8f1ead22009-03-26 00:59:10 -07002722 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002723 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002725 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726}
2727
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002728/**
2729 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002730 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002731 *
2732 * The entry's receive function will be scheduled to run
2733 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002734void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002735{
2736 unsigned long flags;
2737
2738 local_irq_save(flags);
2739 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2740 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2741 local_irq_restore(flags);
2742}
2743EXPORT_SYMBOL(__napi_schedule);
2744
Herbert Xud565b0a2008-12-15 23:38:52 -08002745void __napi_complete(struct napi_struct *n)
2746{
2747 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2748 BUG_ON(n->gro_list);
2749
2750 list_del(&n->poll_list);
2751 smp_mb__before_clear_bit();
2752 clear_bit(NAPI_STATE_SCHED, &n->state);
2753}
2754EXPORT_SYMBOL(__napi_complete);
2755
2756void napi_complete(struct napi_struct *n)
2757{
2758 unsigned long flags;
2759
2760 /*
2761 * don't let napi dequeue from the cpu poll list
2762 * just in case its running on a different cpu
2763 */
2764 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2765 return;
2766
2767 napi_gro_flush(n);
2768 local_irq_save(flags);
2769 __napi_complete(n);
2770 local_irq_restore(flags);
2771}
2772EXPORT_SYMBOL(napi_complete);
2773
2774void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2775 int (*poll)(struct napi_struct *, int), int weight)
2776{
2777 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002778 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002779 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002780 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002781 napi->poll = poll;
2782 napi->weight = weight;
2783 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002784 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002785#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002786 spin_lock_init(&napi->poll_lock);
2787 napi->poll_owner = -1;
2788#endif
2789 set_bit(NAPI_STATE_SCHED, &napi->state);
2790}
2791EXPORT_SYMBOL(netif_napi_add);
2792
2793void netif_napi_del(struct napi_struct *napi)
2794{
2795 struct sk_buff *skb, *next;
2796
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002797 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002798 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002799
2800 for (skb = napi->gro_list; skb; skb = next) {
2801 next = skb->next;
2802 skb->next = NULL;
2803 kfree_skb(skb);
2804 }
2805
2806 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002807 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002808}
2809EXPORT_SYMBOL(netif_napi_del);
2810
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812static void net_rx_action(struct softirq_action *h)
2813{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002814 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002815 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002816 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002817 void *have;
2818
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 local_irq_disable();
2820
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002821 while (!list_empty(list)) {
2822 struct napi_struct *n;
2823 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002825 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002826 * Allow this to run for 2 jiffies since which will allow
2827 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002828 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002829 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 goto softnet_break;
2831
2832 local_irq_enable();
2833
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002834 /* Even though interrupts have been re-enabled, this
2835 * access is safe because interrupts can only add new
2836 * entries to the tail of this list, and only ->poll()
2837 * calls can remove this head entry from the list.
2838 */
2839 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002841 have = netpoll_poll_lock(n);
2842
2843 weight = n->weight;
2844
David S. Miller0a7606c2007-10-29 21:28:47 -07002845 /* This NAPI_STATE_SCHED test is for avoiding a race
2846 * with netpoll's poll_napi(). Only the entity which
2847 * obtains the lock and sees NAPI_STATE_SCHED set will
2848 * actually make the ->poll() call. Therefore we avoid
2849 * accidently calling ->poll() when NAPI is not scheduled.
2850 */
2851 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002852 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002853 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002854 trace_napi_poll(n);
2855 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002856
2857 WARN_ON_ONCE(work > weight);
2858
2859 budget -= work;
2860
2861 local_irq_disable();
2862
2863 /* Drivers must not modify the NAPI state if they
2864 * consume the entire weight. In such cases this code
2865 * still "owns" the NAPI instance and therefore can
2866 * move the instance around on the list at-will.
2867 */
David S. Millerfed17f32008-01-07 21:00:40 -08002868 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002869 if (unlikely(napi_disable_pending(n))) {
2870 local_irq_enable();
2871 napi_complete(n);
2872 local_irq_disable();
2873 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002874 list_move_tail(&n->poll_list, list);
2875 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002876
2877 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 }
2879out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002880 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002881
Chris Leechdb217332006-06-17 21:24:58 -07002882#ifdef CONFIG_NET_DMA
2883 /*
2884 * There may not be any more sk_buffs coming right now, so push
2885 * any pending DMA copies to hardware
2886 */
Dan Williams2ba05622009-01-06 11:38:14 -07002887 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002888#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002889
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 return;
2891
2892softnet_break:
2893 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2894 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2895 goto out;
2896}
2897
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002898static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
2900/**
2901 * register_gifconf - register a SIOCGIF handler
2902 * @family: Address family
2903 * @gifconf: Function handler
2904 *
2905 * Register protocol dependent address dumping routines. The handler
2906 * that is passed must not be freed or reused until it has been replaced
2907 * by another handler.
2908 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002909int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
2911 if (family >= NPROTO)
2912 return -EINVAL;
2913 gifconf_list[family] = gifconf;
2914 return 0;
2915}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002916EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917
2918
2919/*
2920 * Map an interface index to its name (SIOCGIFNAME)
2921 */
2922
2923/*
2924 * We need this ioctl for efficient implementation of the
2925 * if_indextoname() function required by the IPv6 API. Without
2926 * it, we would have to search all the interfaces to find a
2927 * match. --pb
2928 */
2929
Eric W. Biederman881d9662007-09-17 11:56:21 -07002930static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931{
2932 struct net_device *dev;
2933 struct ifreq ifr;
2934
2935 /*
2936 * Fetch the caller's info block.
2937 */
2938
2939 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2940 return -EFAULT;
2941
2942 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002943 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 if (!dev) {
2945 read_unlock(&dev_base_lock);
2946 return -ENODEV;
2947 }
2948
2949 strcpy(ifr.ifr_name, dev->name);
2950 read_unlock(&dev_base_lock);
2951
2952 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2953 return -EFAULT;
2954 return 0;
2955}
2956
2957/*
2958 * Perform a SIOCGIFCONF call. This structure will change
2959 * size eventually, and there is nothing I can do about it.
2960 * Thus we will need a 'compatibility mode'.
2961 */
2962
Eric W. Biederman881d9662007-09-17 11:56:21 -07002963static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964{
2965 struct ifconf ifc;
2966 struct net_device *dev;
2967 char __user *pos;
2968 int len;
2969 int total;
2970 int i;
2971
2972 /*
2973 * Fetch the caller's info block.
2974 */
2975
2976 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2977 return -EFAULT;
2978
2979 pos = ifc.ifc_buf;
2980 len = ifc.ifc_len;
2981
2982 /*
2983 * Loop over the interfaces, and write an info block for each.
2984 */
2985
2986 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002987 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 for (i = 0; i < NPROTO; i++) {
2989 if (gifconf_list[i]) {
2990 int done;
2991 if (!pos)
2992 done = gifconf_list[i](dev, NULL, 0);
2993 else
2994 done = gifconf_list[i](dev, pos + total,
2995 len - total);
2996 if (done < 0)
2997 return -EFAULT;
2998 total += done;
2999 }
3000 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002
3003 /*
3004 * All done. Write the updated control block back to the caller.
3005 */
3006 ifc.ifc_len = total;
3007
3008 /*
3009 * Both BSD and Solaris return 0 here, so we do too.
3010 */
3011 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3012}
3013
3014#ifdef CONFIG_PROC_FS
3015/*
3016 * This is invoked by the /proc filesystem handler to display a device
3017 * in detail.
3018 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003020 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021{
Denis V. Luneve372c412007-11-19 22:31:54 -08003022 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003023 loff_t off;
3024 struct net_device *dev;
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003027 if (!*pos)
3028 return SEQ_START_TOKEN;
3029
3030 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003031 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003032 if (off++ == *pos)
3033 return dev;
3034
3035 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036}
3037
3038void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3039{
Denis V. Luneve372c412007-11-19 22:31:54 -08003040 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003042 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003043 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044}
3045
3046void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003047 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048{
3049 read_unlock(&dev_base_lock);
3050}
3051
3052static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3053{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003054 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055
Rusty Russell5a1b5892007-04-28 21:04:03 -07003056 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3057 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3058 dev->name, stats->rx_bytes, stats->rx_packets,
3059 stats->rx_errors,
3060 stats->rx_dropped + stats->rx_missed_errors,
3061 stats->rx_fifo_errors,
3062 stats->rx_length_errors + stats->rx_over_errors +
3063 stats->rx_crc_errors + stats->rx_frame_errors,
3064 stats->rx_compressed, stats->multicast,
3065 stats->tx_bytes, stats->tx_packets,
3066 stats->tx_errors, stats->tx_dropped,
3067 stats->tx_fifo_errors, stats->collisions,
3068 stats->tx_carrier_errors +
3069 stats->tx_aborted_errors +
3070 stats->tx_window_errors +
3071 stats->tx_heartbeat_errors,
3072 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073}
3074
3075/*
3076 * Called from the PROCfs module. This now uses the new arbitrary sized
3077 * /proc/net interface to create /proc/net/dev
3078 */
3079static int dev_seq_show(struct seq_file *seq, void *v)
3080{
3081 if (v == SEQ_START_TOKEN)
3082 seq_puts(seq, "Inter-| Receive "
3083 " | Transmit\n"
3084 " face |bytes packets errs drop fifo frame "
3085 "compressed multicast|bytes packets errs "
3086 "drop fifo colls carrier compressed\n");
3087 else
3088 dev_seq_printf_stats(seq, v);
3089 return 0;
3090}
3091
3092static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3093{
3094 struct netif_rx_stats *rc = NULL;
3095
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003096 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003097 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 rc = &per_cpu(netdev_rx_stat, *pos);
3099 break;
3100 } else
3101 ++*pos;
3102 return rc;
3103}
3104
3105static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3106{
3107 return softnet_get_online(pos);
3108}
3109
3110static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3111{
3112 ++*pos;
3113 return softnet_get_online(pos);
3114}
3115
3116static void softnet_seq_stop(struct seq_file *seq, void *v)
3117{
3118}
3119
3120static int softnet_seq_show(struct seq_file *seq, void *v)
3121{
3122 struct netif_rx_stats *s = v;
3123
3124 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003125 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003126 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003127 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 return 0;
3129}
3130
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003131static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132 .start = dev_seq_start,
3133 .next = dev_seq_next,
3134 .stop = dev_seq_stop,
3135 .show = dev_seq_show,
3136};
3137
3138static int dev_seq_open(struct inode *inode, struct file *file)
3139{
Denis V. Luneve372c412007-11-19 22:31:54 -08003140 return seq_open_net(inode, file, &dev_seq_ops,
3141 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142}
3143
Arjan van de Ven9a321442007-02-12 00:55:35 -08003144static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 .owner = THIS_MODULE,
3146 .open = dev_seq_open,
3147 .read = seq_read,
3148 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003149 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150};
3151
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003152static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 .start = softnet_seq_start,
3154 .next = softnet_seq_next,
3155 .stop = softnet_seq_stop,
3156 .show = softnet_seq_show,
3157};
3158
3159static int softnet_seq_open(struct inode *inode, struct file *file)
3160{
3161 return seq_open(file, &softnet_seq_ops);
3162}
3163
Arjan van de Ven9a321442007-02-12 00:55:35 -08003164static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 .owner = THIS_MODULE,
3166 .open = softnet_seq_open,
3167 .read = seq_read,
3168 .llseek = seq_lseek,
3169 .release = seq_release,
3170};
3171
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003172static void *ptype_get_idx(loff_t pos)
3173{
3174 struct packet_type *pt = NULL;
3175 loff_t i = 0;
3176 int t;
3177
3178 list_for_each_entry_rcu(pt, &ptype_all, list) {
3179 if (i == pos)
3180 return pt;
3181 ++i;
3182 }
3183
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003184 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003185 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3186 if (i == pos)
3187 return pt;
3188 ++i;
3189 }
3190 }
3191 return NULL;
3192}
3193
3194static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003195 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003196{
3197 rcu_read_lock();
3198 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3199}
3200
3201static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3202{
3203 struct packet_type *pt;
3204 struct list_head *nxt;
3205 int hash;
3206
3207 ++*pos;
3208 if (v == SEQ_START_TOKEN)
3209 return ptype_get_idx(0);
3210
3211 pt = v;
3212 nxt = pt->list.next;
3213 if (pt->type == htons(ETH_P_ALL)) {
3214 if (nxt != &ptype_all)
3215 goto found;
3216 hash = 0;
3217 nxt = ptype_base[0].next;
3218 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003219 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003220
3221 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003222 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003223 return NULL;
3224 nxt = ptype_base[hash].next;
3225 }
3226found:
3227 return list_entry(nxt, struct packet_type, list);
3228}
3229
3230static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003231 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003232{
3233 rcu_read_unlock();
3234}
3235
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003236static int ptype_seq_show(struct seq_file *seq, void *v)
3237{
3238 struct packet_type *pt = v;
3239
3240 if (v == SEQ_START_TOKEN)
3241 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003242 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003243 if (pt->type == htons(ETH_P_ALL))
3244 seq_puts(seq, "ALL ");
3245 else
3246 seq_printf(seq, "%04x", ntohs(pt->type));
3247
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003248 seq_printf(seq, " %-8s %pF\n",
3249 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003250 }
3251
3252 return 0;
3253}
3254
3255static const struct seq_operations ptype_seq_ops = {
3256 .start = ptype_seq_start,
3257 .next = ptype_seq_next,
3258 .stop = ptype_seq_stop,
3259 .show = ptype_seq_show,
3260};
3261
3262static int ptype_seq_open(struct inode *inode, struct file *file)
3263{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003264 return seq_open_net(inode, file, &ptype_seq_ops,
3265 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003266}
3267
3268static const struct file_operations ptype_seq_fops = {
3269 .owner = THIS_MODULE,
3270 .open = ptype_seq_open,
3271 .read = seq_read,
3272 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003273 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003274};
3275
3276
Pavel Emelyanov46650792007-10-08 20:38:39 -07003277static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278{
3279 int rc = -ENOMEM;
3280
Eric W. Biederman881d9662007-09-17 11:56:21 -07003281 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003283 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003285 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003286 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003287
Eric W. Biederman881d9662007-09-17 11:56:21 -07003288 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003289 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 rc = 0;
3291out:
3292 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003293out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003294 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003296 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003298 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 goto out;
3300}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003301
Pavel Emelyanov46650792007-10-08 20:38:39 -07003302static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003303{
3304 wext_proc_exit(net);
3305
3306 proc_net_remove(net, "ptype");
3307 proc_net_remove(net, "softnet_stat");
3308 proc_net_remove(net, "dev");
3309}
3310
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003311static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003312 .init = dev_proc_net_init,
3313 .exit = dev_proc_net_exit,
3314};
3315
3316static int __init dev_proc_init(void)
3317{
3318 return register_pernet_subsys(&dev_proc_ops);
3319}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320#else
3321#define dev_proc_init() 0
3322#endif /* CONFIG_PROC_FS */
3323
3324
3325/**
3326 * netdev_set_master - set up master/slave pair
3327 * @slave: slave device
3328 * @master: new master device
3329 *
3330 * Changes the master device of the slave. Pass %NULL to break the
3331 * bonding. The caller must hold the RTNL semaphore. On a failure
3332 * a negative errno code is returned. On success the reference counts
3333 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3334 * function returns zero.
3335 */
3336int netdev_set_master(struct net_device *slave, struct net_device *master)
3337{
3338 struct net_device *old = slave->master;
3339
3340 ASSERT_RTNL();
3341
3342 if (master) {
3343 if (old)
3344 return -EBUSY;
3345 dev_hold(master);
3346 }
3347
3348 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003349
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 synchronize_net();
3351
3352 if (old)
3353 dev_put(old);
3354
3355 if (master)
3356 slave->flags |= IFF_SLAVE;
3357 else
3358 slave->flags &= ~IFF_SLAVE;
3359
3360 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3361 return 0;
3362}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003363EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003365static void dev_change_rx_flags(struct net_device *dev, int flags)
3366{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003367 const struct net_device_ops *ops = dev->netdev_ops;
3368
3369 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3370 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003371}
3372
Wang Chendad9b332008-06-18 01:48:28 -07003373static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003374{
3375 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003376 uid_t uid;
3377 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003378
Patrick McHardy24023452007-07-14 18:51:31 -07003379 ASSERT_RTNL();
3380
Wang Chendad9b332008-06-18 01:48:28 -07003381 dev->flags |= IFF_PROMISC;
3382 dev->promiscuity += inc;
3383 if (dev->promiscuity == 0) {
3384 /*
3385 * Avoid overflow.
3386 * If inc causes overflow, untouch promisc and return error.
3387 */
3388 if (inc < 0)
3389 dev->flags &= ~IFF_PROMISC;
3390 else {
3391 dev->promiscuity -= inc;
3392 printk(KERN_WARNING "%s: promiscuity touches roof, "
3393 "set promiscuity failed, promiscuity feature "
3394 "of device might be broken.\n", dev->name);
3395 return -EOVERFLOW;
3396 }
3397 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003398 if (dev->flags != old_flags) {
3399 printk(KERN_INFO "device %s %s promiscuous mode\n",
3400 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3401 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003402 if (audit_enabled) {
3403 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003404 audit_log(current->audit_context, GFP_ATOMIC,
3405 AUDIT_ANOM_PROMISCUOUS,
3406 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3407 dev->name, (dev->flags & IFF_PROMISC),
3408 (old_flags & IFF_PROMISC),
3409 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003410 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003411 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003412 }
Patrick McHardy24023452007-07-14 18:51:31 -07003413
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003414 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003415 }
Wang Chendad9b332008-06-18 01:48:28 -07003416 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003417}
3418
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419/**
3420 * dev_set_promiscuity - update promiscuity count on a device
3421 * @dev: device
3422 * @inc: modifier
3423 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003424 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 * remains above zero the interface remains promiscuous. Once it hits zero
3426 * the device reverts back to normal filtering operation. A negative inc
3427 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003428 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 */
Wang Chendad9b332008-06-18 01:48:28 -07003430int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
3432 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003433 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
Wang Chendad9b332008-06-18 01:48:28 -07003435 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003436 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003437 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003438 if (dev->flags != old_flags)
3439 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003440 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003442EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
3444/**
3445 * dev_set_allmulti - update allmulti count on a device
3446 * @dev: device
3447 * @inc: modifier
3448 *
3449 * Add or remove reception of all multicast frames to a device. While the
3450 * count in the device remains above zero the interface remains listening
3451 * to all interfaces. Once it hits zero the device reverts back to normal
3452 * filtering operation. A negative @inc value is used to drop the counter
3453 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003454 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 */
3456
Wang Chendad9b332008-06-18 01:48:28 -07003457int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458{
3459 unsigned short old_flags = dev->flags;
3460
Patrick McHardy24023452007-07-14 18:51:31 -07003461 ASSERT_RTNL();
3462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003464 dev->allmulti += inc;
3465 if (dev->allmulti == 0) {
3466 /*
3467 * Avoid overflow.
3468 * If inc causes overflow, untouch allmulti and return error.
3469 */
3470 if (inc < 0)
3471 dev->flags &= ~IFF_ALLMULTI;
3472 else {
3473 dev->allmulti -= inc;
3474 printk(KERN_WARNING "%s: allmulti touches roof, "
3475 "set allmulti failed, allmulti feature of "
3476 "device might be broken.\n", dev->name);
3477 return -EOVERFLOW;
3478 }
3479 }
Patrick McHardy24023452007-07-14 18:51:31 -07003480 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003481 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003482 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003483 }
Wang Chendad9b332008-06-18 01:48:28 -07003484 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003485}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003486EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003487
3488/*
3489 * Upload unicast and multicast address lists to device and
3490 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003491 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003492 * are present.
3493 */
3494void __dev_set_rx_mode(struct net_device *dev)
3495{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003496 const struct net_device_ops *ops = dev->netdev_ops;
3497
Patrick McHardy4417da62007-06-27 01:28:10 -07003498 /* dev_open will call this function so the list will stay sane. */
3499 if (!(dev->flags&IFF_UP))
3500 return;
3501
3502 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003503 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003504
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003505 if (ops->ndo_set_rx_mode)
3506 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003507 else {
3508 /* Unicast addresses changes may only happen under the rtnl,
3509 * therefore calling __dev_set_promiscuity here is safe.
3510 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003511 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003512 __dev_set_promiscuity(dev, 1);
3513 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003514 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003515 __dev_set_promiscuity(dev, -1);
3516 dev->uc_promisc = 0;
3517 }
3518
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003519 if (ops->ndo_set_multicast_list)
3520 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003521 }
3522}
3523
3524void dev_set_rx_mode(struct net_device *dev)
3525{
David S. Millerb9e40852008-07-15 00:15:08 -07003526 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003527 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003528 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529}
3530
Jiri Pirkof001fde2009-05-05 02:48:28 +00003531/* hw addresses list handling functions */
3532
Jiri Pirko31278e72009-06-17 01:12:19 +00003533static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3534 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003535{
3536 struct netdev_hw_addr *ha;
3537 int alloc_size;
3538
3539 if (addr_len > MAX_ADDR_LEN)
3540 return -EINVAL;
3541
Jiri Pirko31278e72009-06-17 01:12:19 +00003542 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003543 if (!memcmp(ha->addr, addr, addr_len) &&
3544 ha->type == addr_type) {
3545 ha->refcount++;
3546 return 0;
3547 }
3548 }
3549
3550
Jiri Pirkof001fde2009-05-05 02:48:28 +00003551 alloc_size = sizeof(*ha);
3552 if (alloc_size < L1_CACHE_BYTES)
3553 alloc_size = L1_CACHE_BYTES;
3554 ha = kmalloc(alloc_size, GFP_ATOMIC);
3555 if (!ha)
3556 return -ENOMEM;
3557 memcpy(ha->addr, addr, addr_len);
3558 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003559 ha->refcount = 1;
3560 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003561 list_add_tail_rcu(&ha->list, &list->list);
3562 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003563 return 0;
3564}
3565
3566static void ha_rcu_free(struct rcu_head *head)
3567{
3568 struct netdev_hw_addr *ha;
3569
3570 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3571 kfree(ha);
3572}
3573
Jiri Pirko31278e72009-06-17 01:12:19 +00003574static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3575 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003576{
3577 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003578
Jiri Pirko31278e72009-06-17 01:12:19 +00003579 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003580 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003581 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003582 if (--ha->refcount)
3583 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003584 list_del_rcu(&ha->list);
3585 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003586 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003587 return 0;
3588 }
3589 }
3590 return -ENOENT;
3591}
3592
Jiri Pirko31278e72009-06-17 01:12:19 +00003593static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3594 struct netdev_hw_addr_list *from_list,
3595 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003596 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003597{
3598 int err;
3599 struct netdev_hw_addr *ha, *ha2;
3600 unsigned char type;
3601
Jiri Pirko31278e72009-06-17 01:12:19 +00003602 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003603 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003604 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003605 if (err)
3606 goto unroll;
3607 }
3608 return 0;
3609
3610unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003611 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003612 if (ha2 == ha)
3613 break;
3614 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003615 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003616 }
3617 return err;
3618}
3619
Jiri Pirko31278e72009-06-17 01:12:19 +00003620static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3621 struct netdev_hw_addr_list *from_list,
3622 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003623 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003624{
3625 struct netdev_hw_addr *ha;
3626 unsigned char type;
3627
Jiri Pirko31278e72009-06-17 01:12:19 +00003628 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003629 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003630 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003631 }
3632}
3633
Jiri Pirko31278e72009-06-17 01:12:19 +00003634static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3635 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003636 int addr_len)
3637{
3638 int err = 0;
3639 struct netdev_hw_addr *ha, *tmp;
3640
Jiri Pirko31278e72009-06-17 01:12:19 +00003641 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003642 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003643 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003644 addr_len, ha->type);
3645 if (err)
3646 break;
3647 ha->synced = true;
3648 ha->refcount++;
3649 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003650 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3651 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003652 }
3653 }
3654 return err;
3655}
3656
Jiri Pirko31278e72009-06-17 01:12:19 +00003657static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3658 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003659 int addr_len)
3660{
3661 struct netdev_hw_addr *ha, *tmp;
3662
Jiri Pirko31278e72009-06-17 01:12:19 +00003663 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003664 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003665 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003666 addr_len, ha->type);
3667 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003668 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003669 addr_len, ha->type);
3670 }
3671 }
3672}
3673
Jiri Pirko31278e72009-06-17 01:12:19 +00003674static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003675{
3676 struct netdev_hw_addr *ha, *tmp;
3677
Jiri Pirko31278e72009-06-17 01:12:19 +00003678 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003679 list_del_rcu(&ha->list);
3680 call_rcu(&ha->rcu_head, ha_rcu_free);
3681 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003682 list->count = 0;
3683}
3684
3685static void __hw_addr_init(struct netdev_hw_addr_list *list)
3686{
3687 INIT_LIST_HEAD(&list->list);
3688 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003689}
3690
3691/* Device addresses handling functions */
3692
3693static void dev_addr_flush(struct net_device *dev)
3694{
3695 /* rtnl_mutex must be held here */
3696
Jiri Pirko31278e72009-06-17 01:12:19 +00003697 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003698 dev->dev_addr = NULL;
3699}
3700
3701static int dev_addr_init(struct net_device *dev)
3702{
3703 unsigned char addr[MAX_ADDR_LEN];
3704 struct netdev_hw_addr *ha;
3705 int err;
3706
3707 /* rtnl_mutex must be held here */
3708
Jiri Pirko31278e72009-06-17 01:12:19 +00003709 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003710 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003711 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003712 NETDEV_HW_ADDR_T_LAN);
3713 if (!err) {
3714 /*
3715 * Get the first (previously created) address from the list
3716 * and set dev_addr pointer to this location.
3717 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003718 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003719 struct netdev_hw_addr, list);
3720 dev->dev_addr = ha->addr;
3721 }
3722 return err;
3723}
3724
3725/**
3726 * dev_addr_add - Add a device address
3727 * @dev: device
3728 * @addr: address to add
3729 * @addr_type: address type
3730 *
3731 * Add a device address to the device or increase the reference count if
3732 * it already exists.
3733 *
3734 * The caller must hold the rtnl_mutex.
3735 */
3736int dev_addr_add(struct net_device *dev, unsigned char *addr,
3737 unsigned char addr_type)
3738{
3739 int err;
3740
3741 ASSERT_RTNL();
3742
Jiri Pirko31278e72009-06-17 01:12:19 +00003743 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003744 if (!err)
3745 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3746 return err;
3747}
3748EXPORT_SYMBOL(dev_addr_add);
3749
3750/**
3751 * dev_addr_del - Release a device address.
3752 * @dev: device
3753 * @addr: address to delete
3754 * @addr_type: address type
3755 *
3756 * Release reference to a device address and remove it from the device
3757 * if the reference count drops to zero.
3758 *
3759 * The caller must hold the rtnl_mutex.
3760 */
3761int dev_addr_del(struct net_device *dev, unsigned char *addr,
3762 unsigned char addr_type)
3763{
3764 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003765 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003766
3767 ASSERT_RTNL();
3768
Jiri Pirkoccffad252009-05-22 23:22:17 +00003769 /*
3770 * We can not remove the first address from the list because
3771 * dev->dev_addr points to that.
3772 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003773 ha = list_first_entry(&dev->dev_addrs.list,
3774 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003775 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3776 return -ENOENT;
3777
Jiri Pirko31278e72009-06-17 01:12:19 +00003778 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003779 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003780 if (!err)
3781 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3782 return err;
3783}
3784EXPORT_SYMBOL(dev_addr_del);
3785
3786/**
3787 * dev_addr_add_multiple - Add device addresses from another device
3788 * @to_dev: device to which addresses will be added
3789 * @from_dev: device from which addresses will be added
3790 * @addr_type: address type - 0 means type will be used from from_dev
3791 *
3792 * Add device addresses of the one device to another.
3793 **
3794 * The caller must hold the rtnl_mutex.
3795 */
3796int dev_addr_add_multiple(struct net_device *to_dev,
3797 struct net_device *from_dev,
3798 unsigned char addr_type)
3799{
3800 int err;
3801
3802 ASSERT_RTNL();
3803
3804 if (from_dev->addr_len != to_dev->addr_len)
3805 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003806 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003807 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003808 if (!err)
3809 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3810 return err;
3811}
3812EXPORT_SYMBOL(dev_addr_add_multiple);
3813
3814/**
3815 * dev_addr_del_multiple - Delete device addresses by another device
3816 * @to_dev: device where the addresses will be deleted
3817 * @from_dev: device by which addresses the addresses will be deleted
3818 * @addr_type: address type - 0 means type will used from from_dev
3819 *
3820 * Deletes addresses in to device by the list of addresses in from device.
3821 *
3822 * The caller must hold the rtnl_mutex.
3823 */
3824int dev_addr_del_multiple(struct net_device *to_dev,
3825 struct net_device *from_dev,
3826 unsigned char addr_type)
3827{
3828 ASSERT_RTNL();
3829
3830 if (from_dev->addr_len != to_dev->addr_len)
3831 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003832 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003833 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003834 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3835 return 0;
3836}
3837EXPORT_SYMBOL(dev_addr_del_multiple);
3838
Jiri Pirko31278e72009-06-17 01:12:19 +00003839/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003840
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003841int __dev_addr_delete(struct dev_addr_list **list, int *count,
3842 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003843{
3844 struct dev_addr_list *da;
3845
3846 for (; (da = *list) != NULL; list = &da->next) {
3847 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3848 alen == da->da_addrlen) {
3849 if (glbl) {
3850 int old_glbl = da->da_gusers;
3851 da->da_gusers = 0;
3852 if (old_glbl == 0)
3853 break;
3854 }
3855 if (--da->da_users)
3856 return 0;
3857
3858 *list = da->next;
3859 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003860 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003861 return 0;
3862 }
3863 }
3864 return -ENOENT;
3865}
3866
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003867int __dev_addr_add(struct dev_addr_list **list, int *count,
3868 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003869{
3870 struct dev_addr_list *da;
3871
3872 for (da = *list; da != NULL; da = da->next) {
3873 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3874 da->da_addrlen == alen) {
3875 if (glbl) {
3876 int old_glbl = da->da_gusers;
3877 da->da_gusers = 1;
3878 if (old_glbl)
3879 return 0;
3880 }
3881 da->da_users++;
3882 return 0;
3883 }
3884 }
3885
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003886 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003887 if (da == NULL)
3888 return -ENOMEM;
3889 memcpy(da->da_addr, addr, alen);
3890 da->da_addrlen = alen;
3891 da->da_users = 1;
3892 da->da_gusers = glbl ? 1 : 0;
3893 da->next = *list;
3894 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003895 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003896 return 0;
3897}
3898
Patrick McHardy4417da62007-06-27 01:28:10 -07003899/**
3900 * dev_unicast_delete - Release secondary unicast address.
3901 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003902 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003903 *
3904 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003905 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003906 *
3907 * The caller must hold the rtnl_mutex.
3908 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003909int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003910{
3911 int err;
3912
3913 ASSERT_RTNL();
3914
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003915 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003916 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3917 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003918 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003919 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003920 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003921 return err;
3922}
3923EXPORT_SYMBOL(dev_unicast_delete);
3924
3925/**
3926 * dev_unicast_add - add a secondary unicast address
3927 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003928 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003929 *
3930 * Add a secondary unicast address to the device or increase
3931 * the reference count if it already exists.
3932 *
3933 * The caller must hold the rtnl_mutex.
3934 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003935int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003936{
3937 int err;
3938
3939 ASSERT_RTNL();
3940
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003941 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003942 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3943 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003944 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003945 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003946 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003947 return err;
3948}
3949EXPORT_SYMBOL(dev_unicast_add);
3950
Chris Leeche83a2ea2008-01-31 16:53:23 -08003951int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3952 struct dev_addr_list **from, int *from_count)
3953{
3954 struct dev_addr_list *da, *next;
3955 int err = 0;
3956
3957 da = *from;
3958 while (da != NULL) {
3959 next = da->next;
3960 if (!da->da_synced) {
3961 err = __dev_addr_add(to, to_count,
3962 da->da_addr, da->da_addrlen, 0);
3963 if (err < 0)
3964 break;
3965 da->da_synced = 1;
3966 da->da_users++;
3967 } else if (da->da_users == 1) {
3968 __dev_addr_delete(to, to_count,
3969 da->da_addr, da->da_addrlen, 0);
3970 __dev_addr_delete(from, from_count,
3971 da->da_addr, da->da_addrlen, 0);
3972 }
3973 da = next;
3974 }
3975 return err;
3976}
Johannes Bergc4029082009-06-17 17:43:30 +02003977EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003978
3979void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3980 struct dev_addr_list **from, int *from_count)
3981{
3982 struct dev_addr_list *da, *next;
3983
3984 da = *from;
3985 while (da != NULL) {
3986 next = da->next;
3987 if (da->da_synced) {
3988 __dev_addr_delete(to, to_count,
3989 da->da_addr, da->da_addrlen, 0);
3990 da->da_synced = 0;
3991 __dev_addr_delete(from, from_count,
3992 da->da_addr, da->da_addrlen, 0);
3993 }
3994 da = next;
3995 }
3996}
Johannes Bergc4029082009-06-17 17:43:30 +02003997EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003998
3999/**
4000 * dev_unicast_sync - Synchronize device's unicast list to another device
4001 * @to: destination device
4002 * @from: source device
4003 *
4004 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004005 * addresses that have no users left. The source device must be
4006 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004007 *
4008 * This function is intended to be called from the dev->set_rx_mode
4009 * function of layered software devices.
4010 */
4011int dev_unicast_sync(struct net_device *to, struct net_device *from)
4012{
4013 int err = 0;
4014
Jiri Pirkoccffad252009-05-22 23:22:17 +00004015 if (to->addr_len != from->addr_len)
4016 return -EINVAL;
4017
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004018 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004019 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004020 if (!err)
4021 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004022 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004023 return err;
4024}
4025EXPORT_SYMBOL(dev_unicast_sync);
4026
4027/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004028 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004029 * @to: destination device
4030 * @from: source device
4031 *
4032 * Remove all addresses that were added to the destination device by
4033 * dev_unicast_sync(). This function is intended to be called from the
4034 * dev->stop function of layered software devices.
4035 */
4036void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4037{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004038 if (to->addr_len != from->addr_len)
4039 return;
4040
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004041 netif_addr_lock_bh(from);
4042 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004043 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004044 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004045 netif_addr_unlock(to);
4046 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004047}
4048EXPORT_SYMBOL(dev_unicast_unsync);
4049
Jiri Pirkoccffad252009-05-22 23:22:17 +00004050static void dev_unicast_flush(struct net_device *dev)
4051{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004052 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004053 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004054 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004055}
4056
4057static void dev_unicast_init(struct net_device *dev)
4058{
Jiri Pirko31278e72009-06-17 01:12:19 +00004059 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004060}
4061
4062
Denis Cheng12972622007-07-18 02:12:56 -07004063static void __dev_addr_discard(struct dev_addr_list **list)
4064{
4065 struct dev_addr_list *tmp;
4066
4067 while (*list != NULL) {
4068 tmp = *list;
4069 *list = tmp->next;
4070 if (tmp->da_users > tmp->da_gusers)
4071 printk("__dev_addr_discard: address leakage! "
4072 "da_users=%d\n", tmp->da_users);
4073 kfree(tmp);
4074 }
4075}
4076
Denis Cheng26cc2522007-07-18 02:12:03 -07004077static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004078{
David S. Millerb9e40852008-07-15 00:15:08 -07004079 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004080
Denis Cheng456ad752007-07-18 02:10:54 -07004081 __dev_addr_discard(&dev->mc_list);
4082 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004083
David S. Millerb9e40852008-07-15 00:15:08 -07004084 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004085}
4086
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004087/**
4088 * dev_get_flags - get flags reported to userspace
4089 * @dev: device
4090 *
4091 * Get the combination of flag bits exported through APIs to userspace.
4092 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093unsigned dev_get_flags(const struct net_device *dev)
4094{
4095 unsigned flags;
4096
4097 flags = (dev->flags & ~(IFF_PROMISC |
4098 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004099 IFF_RUNNING |
4100 IFF_LOWER_UP |
4101 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 (dev->gflags & (IFF_PROMISC |
4103 IFF_ALLMULTI));
4104
Stefan Rompfb00055a2006-03-20 17:09:11 -08004105 if (netif_running(dev)) {
4106 if (netif_oper_up(dev))
4107 flags |= IFF_RUNNING;
4108 if (netif_carrier_ok(dev))
4109 flags |= IFF_LOWER_UP;
4110 if (netif_dormant(dev))
4111 flags |= IFF_DORMANT;
4112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
4114 return flags;
4115}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004116EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004118/**
4119 * dev_change_flags - change device settings
4120 * @dev: device
4121 * @flags: device state flags
4122 *
4123 * Change settings on device based state flags. The flags are
4124 * in the userspace exported format.
4125 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126int dev_change_flags(struct net_device *dev, unsigned flags)
4127{
Thomas Graf7c355f52007-06-05 16:03:03 -07004128 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 int old_flags = dev->flags;
4130
Patrick McHardy24023452007-07-14 18:51:31 -07004131 ASSERT_RTNL();
4132
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133 /*
4134 * Set the flags on our device.
4135 */
4136
4137 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4138 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4139 IFF_AUTOMEDIA)) |
4140 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4141 IFF_ALLMULTI));
4142
4143 /*
4144 * Load in the correct multicast list now the flags have changed.
4145 */
4146
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004147 if ((old_flags ^ flags) & IFF_MULTICAST)
4148 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004149
Patrick McHardy4417da62007-06-27 01:28:10 -07004150 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151
4152 /*
4153 * Have we downed the interface. We handle IFF_UP ourselves
4154 * according to user attempts to set it, rather than blindly
4155 * setting it.
4156 */
4157
4158 ret = 0;
4159 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4160 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4161
4162 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004163 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 }
4165
4166 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004167 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004169 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170
4171 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004172 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4173
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 dev->gflags ^= IFF_PROMISC;
4175 dev_set_promiscuity(dev, inc);
4176 }
4177
4178 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4179 is important. Some (broken) drivers set IFF_PROMISC, when
4180 IFF_ALLMULTI is requested not asking us and not reporting.
4181 */
4182 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004183 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4184
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 dev->gflags ^= IFF_ALLMULTI;
4186 dev_set_allmulti(dev, inc);
4187 }
4188
Thomas Graf7c355f52007-06-05 16:03:03 -07004189 /* Exclude state transition flags, already notified */
4190 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4191 if (changes)
4192 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193
4194 return ret;
4195}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004196EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004198/**
4199 * dev_set_mtu - Change maximum transfer unit
4200 * @dev: device
4201 * @new_mtu: new transfer unit
4202 *
4203 * Change the maximum transfer size of the network device.
4204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205int dev_set_mtu(struct net_device *dev, int new_mtu)
4206{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004207 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 int err;
4209
4210 if (new_mtu == dev->mtu)
4211 return 0;
4212
4213 /* MTU must be positive. */
4214 if (new_mtu < 0)
4215 return -EINVAL;
4216
4217 if (!netif_device_present(dev))
4218 return -ENODEV;
4219
4220 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004221 if (ops->ndo_change_mtu)
4222 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 else
4224 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004225
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004227 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 return err;
4229}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004230EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004232/**
4233 * dev_set_mac_address - Change Media Access Control Address
4234 * @dev: device
4235 * @sa: new address
4236 *
4237 * Change the hardware (MAC) address of the device
4238 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4240{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004241 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 int err;
4243
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004244 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 return -EOPNOTSUPP;
4246 if (sa->sa_family != dev->type)
4247 return -EINVAL;
4248 if (!netif_device_present(dev))
4249 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004250 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004252 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 return err;
4254}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004255EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256
4257/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004258 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004260static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261{
4262 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004263 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264
4265 if (!dev)
4266 return -ENODEV;
4267
4268 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004269 case SIOCGIFFLAGS: /* Get interface flags */
4270 ifr->ifr_flags = (short) dev_get_flags(dev);
4271 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004273 case SIOCGIFMETRIC: /* Get the metric on the interface
4274 (currently unused) */
4275 ifr->ifr_metric = 0;
4276 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004278 case SIOCGIFMTU: /* Get the MTU of a device */
4279 ifr->ifr_mtu = dev->mtu;
4280 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004281
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004282 case SIOCGIFHWADDR:
4283 if (!dev->addr_len)
4284 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4285 else
4286 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4287 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4288 ifr->ifr_hwaddr.sa_family = dev->type;
4289 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004291 case SIOCGIFSLAVE:
4292 err = -EINVAL;
4293 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004294
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004295 case SIOCGIFMAP:
4296 ifr->ifr_map.mem_start = dev->mem_start;
4297 ifr->ifr_map.mem_end = dev->mem_end;
4298 ifr->ifr_map.base_addr = dev->base_addr;
4299 ifr->ifr_map.irq = dev->irq;
4300 ifr->ifr_map.dma = dev->dma;
4301 ifr->ifr_map.port = dev->if_port;
4302 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004303
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004304 case SIOCGIFINDEX:
4305 ifr->ifr_ifindex = dev->ifindex;
4306 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004307
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004308 case SIOCGIFTXQLEN:
4309 ifr->ifr_qlen = dev->tx_queue_len;
4310 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004311
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004312 default:
4313 /* dev_ioctl() should ensure this case
4314 * is never reached
4315 */
4316 WARN_ON(1);
4317 err = -EINVAL;
4318 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004319
4320 }
4321 return err;
4322}
4323
4324/*
4325 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4326 */
4327static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4328{
4329 int err;
4330 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004331 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004332
4333 if (!dev)
4334 return -ENODEV;
4335
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004336 ops = dev->netdev_ops;
4337
Jeff Garzik14e3e072007-10-08 00:06:32 -07004338 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004339 case SIOCSIFFLAGS: /* Set interface flags */
4340 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004341
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004342 case SIOCSIFMETRIC: /* Set the metric on the interface
4343 (currently unused) */
4344 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004345
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004346 case SIOCSIFMTU: /* Set the MTU of a device */
4347 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004348
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004349 case SIOCSIFHWADDR:
4350 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004352 case SIOCSIFHWBROADCAST:
4353 if (ifr->ifr_hwaddr.sa_family != dev->type)
4354 return -EINVAL;
4355 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4356 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4357 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4358 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004360 case SIOCSIFMAP:
4361 if (ops->ndo_set_config) {
4362 if (!netif_device_present(dev))
4363 return -ENODEV;
4364 return ops->ndo_set_config(dev, &ifr->ifr_map);
4365 }
4366 return -EOPNOTSUPP;
4367
4368 case SIOCADDMULTI:
4369 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4370 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4371 return -EINVAL;
4372 if (!netif_device_present(dev))
4373 return -ENODEV;
4374 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4375 dev->addr_len, 1);
4376
4377 case SIOCDELMULTI:
4378 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4379 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4380 return -EINVAL;
4381 if (!netif_device_present(dev))
4382 return -ENODEV;
4383 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4384 dev->addr_len, 1);
4385
4386 case SIOCSIFTXQLEN:
4387 if (ifr->ifr_qlen < 0)
4388 return -EINVAL;
4389 dev->tx_queue_len = ifr->ifr_qlen;
4390 return 0;
4391
4392 case SIOCSIFNAME:
4393 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4394 return dev_change_name(dev, ifr->ifr_newname);
4395
4396 /*
4397 * Unknown or private ioctl
4398 */
4399 default:
4400 if ((cmd >= SIOCDEVPRIVATE &&
4401 cmd <= SIOCDEVPRIVATE + 15) ||
4402 cmd == SIOCBONDENSLAVE ||
4403 cmd == SIOCBONDRELEASE ||
4404 cmd == SIOCBONDSETHWADDR ||
4405 cmd == SIOCBONDSLAVEINFOQUERY ||
4406 cmd == SIOCBONDINFOQUERY ||
4407 cmd == SIOCBONDCHANGEACTIVE ||
4408 cmd == SIOCGMIIPHY ||
4409 cmd == SIOCGMIIREG ||
4410 cmd == SIOCSMIIREG ||
4411 cmd == SIOCBRADDIF ||
4412 cmd == SIOCBRDELIF ||
4413 cmd == SIOCSHWTSTAMP ||
4414 cmd == SIOCWANDEV) {
4415 err = -EOPNOTSUPP;
4416 if (ops->ndo_do_ioctl) {
4417 if (netif_device_present(dev))
4418 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4419 else
4420 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004422 } else
4423 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424
4425 }
4426 return err;
4427}
4428
4429/*
4430 * This function handles all "interface"-type I/O control requests. The actual
4431 * 'doing' part of this is dev_ifsioc above.
4432 */
4433
4434/**
4435 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004436 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 * @cmd: command to issue
4438 * @arg: pointer to a struct ifreq in user space
4439 *
4440 * Issue ioctl functions to devices. This is normally called by the
4441 * user space syscall interfaces but can sometimes be useful for
4442 * other purposes. The return value is the return from the syscall if
4443 * positive or a negative errno code on error.
4444 */
4445
Eric W. Biederman881d9662007-09-17 11:56:21 -07004446int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447{
4448 struct ifreq ifr;
4449 int ret;
4450 char *colon;
4451
4452 /* One special case: SIOCGIFCONF takes ifconf argument
4453 and requires shared lock, because it sleeps writing
4454 to user space.
4455 */
4456
4457 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004458 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004459 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004460 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461 return ret;
4462 }
4463 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004464 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465
4466 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4467 return -EFAULT;
4468
4469 ifr.ifr_name[IFNAMSIZ-1] = 0;
4470
4471 colon = strchr(ifr.ifr_name, ':');
4472 if (colon)
4473 *colon = 0;
4474
4475 /*
4476 * See which interface the caller is talking about.
4477 */
4478
4479 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004480 /*
4481 * These ioctl calls:
4482 * - can be done by all.
4483 * - atomic and do not require locking.
4484 * - return a value
4485 */
4486 case SIOCGIFFLAGS:
4487 case SIOCGIFMETRIC:
4488 case SIOCGIFMTU:
4489 case SIOCGIFHWADDR:
4490 case SIOCGIFSLAVE:
4491 case SIOCGIFMAP:
4492 case SIOCGIFINDEX:
4493 case SIOCGIFTXQLEN:
4494 dev_load(net, ifr.ifr_name);
4495 read_lock(&dev_base_lock);
4496 ret = dev_ifsioc_locked(net, &ifr, cmd);
4497 read_unlock(&dev_base_lock);
4498 if (!ret) {
4499 if (colon)
4500 *colon = ':';
4501 if (copy_to_user(arg, &ifr,
4502 sizeof(struct ifreq)))
4503 ret = -EFAULT;
4504 }
4505 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004507 case SIOCETHTOOL:
4508 dev_load(net, ifr.ifr_name);
4509 rtnl_lock();
4510 ret = dev_ethtool(net, &ifr);
4511 rtnl_unlock();
4512 if (!ret) {
4513 if (colon)
4514 *colon = ':';
4515 if (copy_to_user(arg, &ifr,
4516 sizeof(struct ifreq)))
4517 ret = -EFAULT;
4518 }
4519 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004521 /*
4522 * These ioctl calls:
4523 * - require superuser power.
4524 * - require strict serialization.
4525 * - return a value
4526 */
4527 case SIOCGMIIPHY:
4528 case SIOCGMIIREG:
4529 case SIOCSIFNAME:
4530 if (!capable(CAP_NET_ADMIN))
4531 return -EPERM;
4532 dev_load(net, ifr.ifr_name);
4533 rtnl_lock();
4534 ret = dev_ifsioc(net, &ifr, cmd);
4535 rtnl_unlock();
4536 if (!ret) {
4537 if (colon)
4538 *colon = ':';
4539 if (copy_to_user(arg, &ifr,
4540 sizeof(struct ifreq)))
4541 ret = -EFAULT;
4542 }
4543 return ret;
4544
4545 /*
4546 * These ioctl calls:
4547 * - require superuser power.
4548 * - require strict serialization.
4549 * - do not return a value
4550 */
4551 case SIOCSIFFLAGS:
4552 case SIOCSIFMETRIC:
4553 case SIOCSIFMTU:
4554 case SIOCSIFMAP:
4555 case SIOCSIFHWADDR:
4556 case SIOCSIFSLAVE:
4557 case SIOCADDMULTI:
4558 case SIOCDELMULTI:
4559 case SIOCSIFHWBROADCAST:
4560 case SIOCSIFTXQLEN:
4561 case SIOCSMIIREG:
4562 case SIOCBONDENSLAVE:
4563 case SIOCBONDRELEASE:
4564 case SIOCBONDSETHWADDR:
4565 case SIOCBONDCHANGEACTIVE:
4566 case SIOCBRADDIF:
4567 case SIOCBRDELIF:
4568 case SIOCSHWTSTAMP:
4569 if (!capable(CAP_NET_ADMIN))
4570 return -EPERM;
4571 /* fall through */
4572 case SIOCBONDSLAVEINFOQUERY:
4573 case SIOCBONDINFOQUERY:
4574 dev_load(net, ifr.ifr_name);
4575 rtnl_lock();
4576 ret = dev_ifsioc(net, &ifr, cmd);
4577 rtnl_unlock();
4578 return ret;
4579
4580 case SIOCGIFMEM:
4581 /* Get the per device memory space. We can add this but
4582 * currently do not support it */
4583 case SIOCSIFMEM:
4584 /* Set the per device memory buffer space.
4585 * Not applicable in our case */
4586 case SIOCSIFLINK:
4587 return -EINVAL;
4588
4589 /*
4590 * Unknown or private ioctl.
4591 */
4592 default:
4593 if (cmd == SIOCWANDEV ||
4594 (cmd >= SIOCDEVPRIVATE &&
4595 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004596 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004598 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004600 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004602 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004604 }
4605 /* Take care of Wireless Extensions */
4606 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4607 return wext_handle_ioctl(net, &ifr, cmd, arg);
4608 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609 }
4610}
4611
4612
4613/**
4614 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004615 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 *
4617 * Returns a suitable unique value for a new device interface
4618 * number. The caller must hold the rtnl semaphore or the
4619 * dev_base_lock to be sure it remains unique.
4620 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004621static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622{
4623 static int ifindex;
4624 for (;;) {
4625 if (++ifindex <= 0)
4626 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004627 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 return ifindex;
4629 }
4630}
4631
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004633static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004635static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638}
4639
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004640static void rollback_registered_many(struct list_head *head)
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004641{
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004642 struct net_device *dev;
4643
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004644 BUG_ON(dev_boot_phase);
4645 ASSERT_RTNL();
4646
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004647 list_for_each_entry(dev, head, unreg_list) {
4648 /* Some devices call without registering
4649 * for initialization unwind.
4650 */
4651 if (dev->reg_state == NETREG_UNINITIALIZED) {
4652 pr_debug("unregister_netdevice: device %s/%p never "
4653 "was registered\n", dev->name, dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004654
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004655 WARN_ON(1);
4656 return;
4657 }
4658
4659 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4660
4661 /* If device is running, close it first. */
4662 dev_close(dev);
4663
4664 /* And unlink it from device chain. */
4665 unlist_netdevice(dev);
4666
4667 dev->reg_state = NETREG_UNREGISTERING;
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004668 }
4669
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004670 synchronize_net();
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004671
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004672 list_for_each_entry(dev, head, unreg_list) {
4673 /* Shutdown queueing discipline. */
4674 dev_shutdown(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004675
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004676
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004677 /* Notify protocols, that we are about to destroy
4678 this device. They should clean all the things.
4679 */
4680 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4681
4682 /*
4683 * Flush the unicast and multicast chains
4684 */
4685 dev_unicast_flush(dev);
4686 dev_addr_discard(dev);
4687
4688 if (dev->netdev_ops->ndo_uninit)
4689 dev->netdev_ops->ndo_uninit(dev);
4690
4691 /* Notifier chain MUST detach us from master device. */
4692 WARN_ON(dev->master);
4693
4694 /* Remove entries from kobject tree */
4695 netdev_unregister_kobject(dev);
4696 }
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004697
4698 synchronize_net();
4699
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004700 list_for_each_entry(dev, head, unreg_list)
4701 dev_put(dev);
4702}
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004703
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004704static void rollback_registered(struct net_device *dev)
4705{
4706 LIST_HEAD(single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004707
Eric Dumazet9b5e3832009-10-27 07:04:19 +00004708 list_add(&dev->unreg_list, &single);
4709 rollback_registered_many(&single);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004710}
4711
David S. Millere8a04642008-07-17 00:34:19 -07004712static void __netdev_init_queue_locks_one(struct net_device *dev,
4713 struct netdev_queue *dev_queue,
4714 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004715{
4716 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004717 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004718 dev_queue->xmit_lock_owner = -1;
4719}
4720
4721static void netdev_init_queue_locks(struct net_device *dev)
4722{
David S. Millere8a04642008-07-17 00:34:19 -07004723 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4724 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004725}
4726
Herbert Xub63365a2008-10-23 01:11:29 -07004727unsigned long netdev_fix_features(unsigned long features, const char *name)
4728{
4729 /* Fix illegal SG+CSUM combinations. */
4730 if ((features & NETIF_F_SG) &&
4731 !(features & NETIF_F_ALL_CSUM)) {
4732 if (name)
4733 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4734 "checksum feature.\n", name);
4735 features &= ~NETIF_F_SG;
4736 }
4737
4738 /* TSO requires that SG is present as well. */
4739 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4740 if (name)
4741 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4742 "SG feature.\n", name);
4743 features &= ~NETIF_F_TSO;
4744 }
4745
4746 if (features & NETIF_F_UFO) {
4747 if (!(features & NETIF_F_GEN_CSUM)) {
4748 if (name)
4749 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4750 "since no NETIF_F_HW_CSUM feature.\n",
4751 name);
4752 features &= ~NETIF_F_UFO;
4753 }
4754
4755 if (!(features & NETIF_F_SG)) {
4756 if (name)
4757 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4758 "since no NETIF_F_SG feature.\n", name);
4759 features &= ~NETIF_F_UFO;
4760 }
4761 }
4762
4763 return features;
4764}
4765EXPORT_SYMBOL(netdev_fix_features);
4766
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767/**
4768 * register_netdevice - register a network device
4769 * @dev: device to register
4770 *
4771 * Take a completed network device structure and add it to the kernel
4772 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4773 * chain. 0 is returned on success. A negative errno code is returned
4774 * on a failure to set up the device, or if the name is a duplicate.
4775 *
4776 * Callers must hold the rtnl semaphore. You may want
4777 * register_netdev() instead of this.
4778 *
4779 * BUGS:
4780 * The locking appears insufficient to guarantee two parallel registers
4781 * will not get the same name.
4782 */
4783
4784int register_netdevice(struct net_device *dev)
4785{
4786 struct hlist_head *head;
4787 struct hlist_node *p;
4788 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004789 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790
4791 BUG_ON(dev_boot_phase);
4792 ASSERT_RTNL();
4793
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004794 might_sleep();
4795
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796 /* When net_device's are persistent, this will be fatal. */
4797 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004798 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799
David S. Millerf1f28aa2008-07-15 00:08:33 -07004800 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004801 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004802 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 dev->iflink = -1;
4805
4806 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004807 if (dev->netdev_ops->ndo_init) {
4808 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 if (ret) {
4810 if (ret > 0)
4811 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004812 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813 }
4814 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004815
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 if (!dev_valid_name(dev->name)) {
4817 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004818 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819 }
4820
Eric W. Biederman881d9662007-09-17 11:56:21 -07004821 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822 if (dev->iflink == -1)
4823 dev->iflink = dev->ifindex;
4824
4825 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004826 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 hlist_for_each(p, head) {
4828 struct net_device *d
4829 = hlist_entry(p, struct net_device, name_hlist);
4830 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4831 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004832 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004836 /* Fix illegal checksum combinations */
4837 if ((dev->features & NETIF_F_HW_CSUM) &&
4838 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4839 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4840 dev->name);
4841 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4842 }
4843
4844 if ((dev->features & NETIF_F_NO_CSUM) &&
4845 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4846 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4847 dev->name);
4848 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4849 }
4850
Herbert Xub63365a2008-10-23 01:11:29 -07004851 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004853 /* Enable software GSO if SG is supported. */
4854 if (dev->features & NETIF_F_SG)
4855 dev->features |= NETIF_F_GSO;
4856
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004857 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004858
4859 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4860 ret = notifier_to_errno(ret);
4861 if (ret)
4862 goto err_uninit;
4863
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004864 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004865 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004866 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004867 dev->reg_state = NETREG_REGISTERED;
4868
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 /*
4870 * Default initial state at registry is that the
4871 * device is present.
4872 */
4873
4874 set_bit(__LINK_STATE_PRESENT, &dev->state);
4875
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004878 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879
4880 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004881 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004882 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004883 if (ret) {
4884 rollback_registered(dev);
4885 dev->reg_state = NETREG_UNREGISTERED;
4886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887
4888out:
4889 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004890
4891err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004892 if (dev->netdev_ops->ndo_uninit)
4893 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004894 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004896EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897
4898/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004899 * init_dummy_netdev - init a dummy network device for NAPI
4900 * @dev: device to init
4901 *
4902 * This takes a network device structure and initialize the minimum
4903 * amount of fields so it can be used to schedule NAPI polls without
4904 * registering a full blown interface. This is to be used by drivers
4905 * that need to tie several hardware interfaces to a single NAPI
4906 * poll scheduler due to HW limitations.
4907 */
4908int init_dummy_netdev(struct net_device *dev)
4909{
4910 /* Clear everything. Note we don't initialize spinlocks
4911 * are they aren't supposed to be taken by any of the
4912 * NAPI code and this dummy netdev is supposed to be
4913 * only ever used for NAPI polls
4914 */
4915 memset(dev, 0, sizeof(struct net_device));
4916
4917 /* make sure we BUG if trying to hit standard
4918 * register/unregister code path
4919 */
4920 dev->reg_state = NETREG_DUMMY;
4921
4922 /* initialize the ref count */
4923 atomic_set(&dev->refcnt, 1);
4924
4925 /* NAPI wants this */
4926 INIT_LIST_HEAD(&dev->napi_list);
4927
4928 /* a dummy interface is started by default */
4929 set_bit(__LINK_STATE_PRESENT, &dev->state);
4930 set_bit(__LINK_STATE_START, &dev->state);
4931
4932 return 0;
4933}
4934EXPORT_SYMBOL_GPL(init_dummy_netdev);
4935
4936
4937/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 * register_netdev - register a network device
4939 * @dev: device to register
4940 *
4941 * Take a completed network device structure and add it to the kernel
4942 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4943 * chain. 0 is returned on success. A negative errno code is returned
4944 * on a failure to set up the device, or if the name is a duplicate.
4945 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004946 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 * and expands the device name if you passed a format string to
4948 * alloc_netdev.
4949 */
4950int register_netdev(struct net_device *dev)
4951{
4952 int err;
4953
4954 rtnl_lock();
4955
4956 /*
4957 * If the name is a format string the caller wants us to do a
4958 * name allocation.
4959 */
4960 if (strchr(dev->name, '%')) {
4961 err = dev_alloc_name(dev, dev->name);
4962 if (err < 0)
4963 goto out;
4964 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004965
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 err = register_netdevice(dev);
4967out:
4968 rtnl_unlock();
4969 return err;
4970}
4971EXPORT_SYMBOL(register_netdev);
4972
4973/*
4974 * netdev_wait_allrefs - wait until all references are gone.
4975 *
4976 * This is called when unregistering network devices.
4977 *
4978 * Any protocol or device that holds a reference should register
4979 * for netdevice notification, and cleanup and put back the
4980 * reference if they receive an UNREGISTER event.
4981 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004982 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 */
4984static void netdev_wait_allrefs(struct net_device *dev)
4985{
4986 unsigned long rebroadcast_time, warning_time;
4987
4988 rebroadcast_time = warning_time = jiffies;
4989 while (atomic_read(&dev->refcnt) != 0) {
4990 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004991 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
4993 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004994 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995
4996 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4997 &dev->state)) {
4998 /* We must not have linkwatch events
4999 * pending on unregister. If this
5000 * happens, we simply run the queue
5001 * unscheduled, resulting in a noop
5002 * for this device.
5003 */
5004 linkwatch_run_queue();
5005 }
5006
Stephen Hemminger6756ae42006-03-20 22:23:58 -08005007 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008
5009 rebroadcast_time = jiffies;
5010 }
5011
5012 msleep(250);
5013
5014 if (time_after(jiffies, warning_time + 10 * HZ)) {
5015 printk(KERN_EMERG "unregister_netdevice: "
5016 "waiting for %s to become free. Usage "
5017 "count = %d\n",
5018 dev->name, atomic_read(&dev->refcnt));
5019 warning_time = jiffies;
5020 }
5021 }
5022}
5023
5024/* The sequence is:
5025 *
5026 * rtnl_lock();
5027 * ...
5028 * register_netdevice(x1);
5029 * register_netdevice(x2);
5030 * ...
5031 * unregister_netdevice(y1);
5032 * unregister_netdevice(y2);
5033 * ...
5034 * rtnl_unlock();
5035 * free_netdev(y1);
5036 * free_netdev(y2);
5037 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005038 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005039 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005040 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005041 * without deadlocking with linkwatch via keventd.
5042 * 2) Since we run with the RTNL semaphore not held, we can sleep
5043 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005044 *
5045 * We must not return until all unregister events added during
5046 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048void netdev_run_todo(void)
5049{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005050 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005053 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005054
5055 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005056
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 while (!list_empty(&list)) {
5058 struct net_device *dev
5059 = list_entry(list.next, struct net_device, todo_list);
5060 list_del(&dev->todo_list);
5061
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005062 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063 printk(KERN_ERR "network todo '%s' but state %d\n",
5064 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005065 dump_stack();
5066 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005068
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005069 dev->reg_state = NETREG_UNREGISTERED;
5070
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005071 on_each_cpu(flush_backlog, dev, 1);
5072
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005073 netdev_wait_allrefs(dev);
5074
5075 /* paranoia */
5076 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005077 WARN_ON(dev->ip_ptr);
5078 WARN_ON(dev->ip6_ptr);
5079 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005080
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005081 if (dev->destructor)
5082 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005083
5084 /* Free network device */
5085 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087}
5088
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005089/**
5090 * dev_get_stats - get network device statistics
5091 * @dev: device to get statistics from
5092 *
5093 * Get network statistics from device. The device driver may provide
5094 * its own method by setting dev->netdev_ops->get_stats; otherwise
5095 * the internal statistics structure is used.
5096 */
5097const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005098{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005099 const struct net_device_ops *ops = dev->netdev_ops;
5100
5101 if (ops->ndo_get_stats)
5102 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005103 else {
5104 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5105 struct net_device_stats *stats = &dev->stats;
5106 unsigned int i;
5107 struct netdev_queue *txq;
5108
5109 for (i = 0; i < dev->num_tx_queues; i++) {
5110 txq = netdev_get_tx_queue(dev, i);
5111 tx_bytes += txq->tx_bytes;
5112 tx_packets += txq->tx_packets;
5113 tx_dropped += txq->tx_dropped;
5114 }
5115 if (tx_bytes || tx_packets || tx_dropped) {
5116 stats->tx_bytes = tx_bytes;
5117 stats->tx_packets = tx_packets;
5118 stats->tx_dropped = tx_dropped;
5119 }
5120 return stats;
5121 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005122}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005123EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005124
David S. Millerdc2b4842008-07-08 17:18:23 -07005125static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005126 struct netdev_queue *queue,
5127 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005128{
David S. Millerdc2b4842008-07-08 17:18:23 -07005129 queue->dev = dev;
5130}
5131
David S. Millerbb949fb2008-07-08 16:55:56 -07005132static void netdev_init_queues(struct net_device *dev)
5133{
David S. Millere8a04642008-07-17 00:34:19 -07005134 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5135 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005136 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005137}
5138
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005140 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141 * @sizeof_priv: size of private data to allocate space for
5142 * @name: device name format string
5143 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005144 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 *
5146 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005147 * and performs basic initialization. Also allocates subquue structs
5148 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005150struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5151 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152{
David S. Millere8a04642008-07-17 00:34:19 -07005153 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005155 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005156 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005158 BUG_ON(strlen(name) >= sizeof(dev->name));
5159
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005160 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005161 if (sizeof_priv) {
5162 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005163 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005164 alloc_size += sizeof_priv;
5165 }
5166 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005167 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005169 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005171 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172 return NULL;
5173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174
Stephen Hemminger79439862008-07-21 13:28:44 -07005175 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005176 if (!tx) {
5177 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5178 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005179 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005180 }
5181
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005182 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005184
5185 if (dev_addr_init(dev))
5186 goto free_tx;
5187
Jiri Pirkoccffad252009-05-22 23:22:17 +00005188 dev_unicast_init(dev);
5189
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005190 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191
David S. Millere8a04642008-07-17 00:34:19 -07005192 dev->_tx = tx;
5193 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005194 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005195
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005196 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197
David S. Millerbb949fb2008-07-08 16:55:56 -07005198 netdev_init_queues(dev);
5199
Herbert Xud565b0a2008-12-15 23:38:52 -08005200 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005201 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 setup(dev);
5203 strcpy(dev->name, name);
5204 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005205
5206free_tx:
5207 kfree(tx);
5208
5209free_p:
5210 kfree(p);
5211 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005213EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
5215/**
5216 * free_netdev - free network device
5217 * @dev: device
5218 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005219 * This function does the last stage of destroying an allocated device
5220 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 * If this is the last reference then it will be freed.
5222 */
5223void free_netdev(struct net_device *dev)
5224{
Herbert Xud565b0a2008-12-15 23:38:52 -08005225 struct napi_struct *p, *n;
5226
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005227 release_net(dev_net(dev));
5228
David S. Millere8a04642008-07-17 00:34:19 -07005229 kfree(dev->_tx);
5230
Jiri Pirkof001fde2009-05-05 02:48:28 +00005231 /* Flush device addresses */
5232 dev_addr_flush(dev);
5233
Herbert Xud565b0a2008-12-15 23:38:52 -08005234 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5235 netif_napi_del(p);
5236
Stephen Hemminger3041a062006-05-26 13:25:24 -07005237 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 if (dev->reg_state == NETREG_UNINITIALIZED) {
5239 kfree((char *)dev - dev->padded);
5240 return;
5241 }
5242
5243 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5244 dev->reg_state = NETREG_RELEASED;
5245
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005246 /* will free via device release */
5247 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005249EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005250
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005251/**
5252 * synchronize_net - Synchronize with packet receive processing
5253 *
5254 * Wait for packets currently being received to be done.
5255 * Does not block later packets from starting.
5256 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005257void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258{
5259 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005260 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005262EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263
5264/**
Eric Dumazet44a08732009-10-27 07:03:04 +00005265 * unregister_netdevice_queue - remove device from the kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 * @dev: device
Eric Dumazet44a08732009-10-27 07:03:04 +00005267 * @head: list
5268
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005270 * from the kernel tables.
Eric Dumazet44a08732009-10-27 07:03:04 +00005271 * If head not NULL, device is queued to be unregistered later.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 *
5273 * Callers must hold the rtnl semaphore. You may want
5274 * unregister_netdev() instead of this.
5275 */
5276
Eric Dumazet44a08732009-10-27 07:03:04 +00005277void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278{
Herbert Xua6620712007-12-12 19:21:56 -08005279 ASSERT_RTNL();
5280
Eric Dumazet44a08732009-10-27 07:03:04 +00005281 if (head) {
5282 list_add_tail(&dev->unreg_list, head);
5283 } else {
5284 rollback_registered(dev);
5285 /* Finish processing unregister after unlock */
5286 net_set_todo(dev);
5287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288}
Eric Dumazet44a08732009-10-27 07:03:04 +00005289EXPORT_SYMBOL(unregister_netdevice_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290
5291/**
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005292 * unregister_netdevice_many - unregister many devices
5293 * @head: list of devices
5294 *
5295 */
5296void unregister_netdevice_many(struct list_head *head)
5297{
5298 struct net_device *dev;
5299
5300 if (!list_empty(head)) {
5301 rollback_registered_many(head);
5302 list_for_each_entry(dev, head, unreg_list)
5303 net_set_todo(dev);
5304 }
5305}
Eric Dumazet63c80992009-10-27 07:06:49 +00005306EXPORT_SYMBOL(unregister_netdevice_many);
Eric Dumazet9b5e3832009-10-27 07:04:19 +00005307
5308/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005309 * unregister_netdev - remove device from the kernel
5310 * @dev: device
5311 *
5312 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005313 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314 *
5315 * This is just a wrapper for unregister_netdevice that takes
5316 * the rtnl semaphore. In general you want to use this and not
5317 * unregister_netdevice.
5318 */
5319void unregister_netdev(struct net_device *dev)
5320{
5321 rtnl_lock();
5322 unregister_netdevice(dev);
5323 rtnl_unlock();
5324}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325EXPORT_SYMBOL(unregister_netdev);
5326
Eric W. Biedermance286d32007-09-12 13:53:49 +02005327/**
5328 * dev_change_net_namespace - move device to different nethost namespace
5329 * @dev: device
5330 * @net: network namespace
5331 * @pat: If not NULL name pattern to try if the current device name
5332 * is already taken in the destination network namespace.
5333 *
5334 * This function shuts down a device interface and moves it
5335 * to a new network namespace. On success 0 is returned, on
5336 * a failure a netagive errno code is returned.
5337 *
5338 * Callers must hold the rtnl semaphore.
5339 */
5340
5341int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5342{
5343 char buf[IFNAMSIZ];
5344 const char *destname;
5345 int err;
5346
5347 ASSERT_RTNL();
5348
5349 /* Don't allow namespace local devices to be moved. */
5350 err = -EINVAL;
5351 if (dev->features & NETIF_F_NETNS_LOCAL)
5352 goto out;
5353
Eric W. Biederman38918452008-10-27 17:51:47 -07005354#ifdef CONFIG_SYSFS
5355 /* Don't allow real devices to be moved when sysfs
5356 * is enabled.
5357 */
5358 err = -EINVAL;
5359 if (dev->dev.parent)
5360 goto out;
5361#endif
5362
Eric W. Biedermance286d32007-09-12 13:53:49 +02005363 /* Ensure the device has been registrered */
5364 err = -EINVAL;
5365 if (dev->reg_state != NETREG_REGISTERED)
5366 goto out;
5367
5368 /* Get out if there is nothing todo */
5369 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005370 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005371 goto out;
5372
5373 /* Pick the destination device name, and ensure
5374 * we can use it in the destination network namespace.
5375 */
5376 err = -EEXIST;
5377 destname = dev->name;
5378 if (__dev_get_by_name(net, destname)) {
5379 /* We get here if we can't use the current device name */
5380 if (!pat)
5381 goto out;
5382 if (!dev_valid_name(pat))
5383 goto out;
5384 if (strchr(pat, '%')) {
5385 if (__dev_alloc_name(net, pat, buf) < 0)
5386 goto out;
5387 destname = buf;
5388 } else
5389 destname = pat;
5390 if (__dev_get_by_name(net, destname))
5391 goto out;
5392 }
5393
5394 /*
5395 * And now a mini version of register_netdevice unregister_netdevice.
5396 */
5397
5398 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005399 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005400
5401 /* And unlink it from device chain */
5402 err = -ENODEV;
5403 unlist_netdevice(dev);
5404
5405 synchronize_net();
5406
5407 /* Shutdown queueing discipline. */
5408 dev_shutdown(dev);
5409
5410 /* Notify protocols, that we are about to destroy
5411 this device. They should clean all the things.
5412 */
5413 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5414
5415 /*
5416 * Flush the unicast and multicast chains
5417 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005418 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005419 dev_addr_discard(dev);
5420
Eric W. Biederman38918452008-10-27 17:51:47 -07005421 netdev_unregister_kobject(dev);
5422
Eric W. Biedermance286d32007-09-12 13:53:49 +02005423 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005424 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005425
5426 /* Assign the new device name */
5427 if (destname != dev->name)
5428 strcpy(dev->name, destname);
5429
5430 /* If there is an ifindex conflict assign a new one */
5431 if (__dev_get_by_index(net, dev->ifindex)) {
5432 int iflink = (dev->iflink == dev->ifindex);
5433 dev->ifindex = dev_new_index(net);
5434 if (iflink)
5435 dev->iflink = dev->ifindex;
5436 }
5437
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005438 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005439 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005440 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005441
5442 /* Add the device back in the hashes */
5443 list_netdevice(dev);
5444
5445 /* Notify protocols, that a new device appeared. */
5446 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5447
5448 synchronize_net();
5449 err = 0;
5450out:
5451 return err;
5452}
Johannes Berg463d0182009-07-14 00:33:35 +02005453EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005454
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455static int dev_cpu_callback(struct notifier_block *nfb,
5456 unsigned long action,
5457 void *ocpu)
5458{
5459 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005460 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 struct sk_buff *skb;
5462 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5463 struct softnet_data *sd, *oldsd;
5464
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005465 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 return NOTIFY_OK;
5467
5468 local_irq_disable();
5469 cpu = smp_processor_id();
5470 sd = &per_cpu(softnet_data, cpu);
5471 oldsd = &per_cpu(softnet_data, oldcpu);
5472
5473 /* Find end of our completion_queue. */
5474 list_skb = &sd->completion_queue;
5475 while (*list_skb)
5476 list_skb = &(*list_skb)->next;
5477 /* Append completion queue from offline CPU. */
5478 *list_skb = oldsd->completion_queue;
5479 oldsd->completion_queue = NULL;
5480
5481 /* Find end of our output_queue. */
5482 list_net = &sd->output_queue;
5483 while (*list_net)
5484 list_net = &(*list_net)->next_sched;
5485 /* Append output queue from offline CPU. */
5486 *list_net = oldsd->output_queue;
5487 oldsd->output_queue = NULL;
5488
5489 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5490 local_irq_enable();
5491
5492 /* Process offline CPU's input_pkt_queue */
5493 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5494 netif_rx(skb);
5495
5496 return NOTIFY_OK;
5497}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005498
5499
Herbert Xu7f353bf2007-08-10 15:47:58 -07005500/**
Herbert Xub63365a2008-10-23 01:11:29 -07005501 * netdev_increment_features - increment feature set by one
5502 * @all: current feature set
5503 * @one: new feature set
5504 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005505 *
5506 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005507 * @one to the master device with current feature set @all. Will not
5508 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005509 */
Herbert Xub63365a2008-10-23 01:11:29 -07005510unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5511 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005512{
Herbert Xub63365a2008-10-23 01:11:29 -07005513 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005514 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005515 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5516 else if (mask & NETIF_F_ALL_CSUM) {
5517 /* If one device supports v4/v6 checksumming, set for all. */
5518 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5519 !(all & NETIF_F_GEN_CSUM)) {
5520 all &= ~NETIF_F_ALL_CSUM;
5521 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5522 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005523
Herbert Xub63365a2008-10-23 01:11:29 -07005524 /* If one device supports hw checksumming, set for all. */
5525 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5526 all &= ~NETIF_F_ALL_CSUM;
5527 all |= NETIF_F_HW_CSUM;
5528 }
5529 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005530
Herbert Xub63365a2008-10-23 01:11:29 -07005531 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005532
Herbert Xub63365a2008-10-23 01:11:29 -07005533 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005534 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005535 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005536
5537 return all;
5538}
Herbert Xub63365a2008-10-23 01:11:29 -07005539EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005540
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005541static struct hlist_head *netdev_create_hash(void)
5542{
5543 int i;
5544 struct hlist_head *hash;
5545
5546 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5547 if (hash != NULL)
5548 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5549 INIT_HLIST_HEAD(&hash[i]);
5550
5551 return hash;
5552}
5553
Eric W. Biederman881d9662007-09-17 11:56:21 -07005554/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005555static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005556{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005557 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005558
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005559 net->dev_name_head = netdev_create_hash();
5560 if (net->dev_name_head == NULL)
5561 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005562
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005563 net->dev_index_head = netdev_create_hash();
5564 if (net->dev_index_head == NULL)
5565 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005566
5567 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005568
5569err_idx:
5570 kfree(net->dev_name_head);
5571err_name:
5572 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005573}
5574
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005575/**
5576 * netdev_drivername - network driver for the device
5577 * @dev: network device
5578 * @buffer: buffer for resulting name
5579 * @len: size of buffer
5580 *
5581 * Determine network driver for device.
5582 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005583char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005584{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005585 const struct device_driver *driver;
5586 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005587
5588 if (len <= 0 || !buffer)
5589 return buffer;
5590 buffer[0] = 0;
5591
5592 parent = dev->dev.parent;
5593
5594 if (!parent)
5595 return buffer;
5596
5597 driver = parent->driver;
5598 if (driver && driver->name)
5599 strlcpy(buffer, driver->name, len);
5600 return buffer;
5601}
5602
Pavel Emelyanov46650792007-10-08 20:38:39 -07005603static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005604{
5605 kfree(net->dev_name_head);
5606 kfree(net->dev_index_head);
5607}
5608
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005609static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005610 .init = netdev_init,
5611 .exit = netdev_exit,
5612};
5613
Pavel Emelyanov46650792007-10-08 20:38:39 -07005614static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005615{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005616 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005617 /*
5618 * Push all migratable of the network devices back to the
5619 * initial network namespace
5620 */
5621 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005622restart:
5623 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005624 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005625 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005626
5627 /* Ignore unmoveable devices (i.e. loopback) */
5628 if (dev->features & NETIF_F_NETNS_LOCAL)
5629 continue;
5630
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005631 /* Delete virtual devices */
5632 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
Eric Dumazet23289a32009-10-27 07:06:36 +00005633 dev->rtnl_link_ops->dellink(dev, NULL);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005634 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005635 }
5636
Eric W. Biedermance286d32007-09-12 13:53:49 +02005637 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005638 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5639 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005640 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005641 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005642 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005643 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005644 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005645 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005646 }
5647 rtnl_unlock();
5648}
5649
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005650static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005651 .exit = default_device_exit,
5652};
5653
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654/*
5655 * Initialize the DEV module. At boot time this walks the device list and
5656 * unhooks any devices that fail to initialise (normally hardware not
5657 * present) and leaves us with a valid list of present and active devices.
5658 *
5659 */
5660
5661/*
5662 * This is called single threaded during boot, so no need
5663 * to take the rtnl semaphore.
5664 */
5665static int __init net_dev_init(void)
5666{
5667 int i, rc = -ENOMEM;
5668
5669 BUG_ON(!dev_boot_phase);
5670
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 if (dev_proc_init())
5672 goto out;
5673
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005674 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675 goto out;
5676
5677 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005678 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679 INIT_LIST_HEAD(&ptype_base[i]);
5680
Eric W. Biederman881d9662007-09-17 11:56:21 -07005681 if (register_pernet_subsys(&netdev_net_ops))
5682 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683
5684 /*
5685 * Initialise the packet receive queues.
5686 */
5687
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005688 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689 struct softnet_data *queue;
5690
5691 queue = &per_cpu(softnet_data, i);
5692 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005693 queue->completion_queue = NULL;
5694 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005695
5696 queue->backlog.poll = process_backlog;
5697 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005698 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005699 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005700 }
5701
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702 dev_boot_phase = 0;
5703
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005704 /* The loopback device is special if any other network devices
5705 * is present in a network namespace the loopback device must
5706 * be present. Since we now dynamically allocate and free the
5707 * loopback device ensure this invariant is maintained by
5708 * keeping the loopback device as the first device on the
5709 * list of network devices. Ensuring the loopback devices
5710 * is the first device that appears and the last network device
5711 * that disappears.
5712 */
5713 if (register_pernet_device(&loopback_net_ops))
5714 goto out;
5715
5716 if (register_pernet_device(&default_device_ops))
5717 goto out;
5718
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005719 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5720 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721
5722 hotcpu_notifier(dev_cpu_callback, 0);
5723 dst_init();
5724 dev_mcast_init();
5725 rc = 0;
5726out:
5727 return rc;
5728}
5729
5730subsys_initcall(net_dev_init);
5731
Krishna Kumare88721f2009-02-18 17:55:02 -08005732static int __init initialize_hashrnd(void)
5733{
5734 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5735 return 0;
5736}
5737
5738late_initcall_sync(initialize_hashrnd);
5739