blob: 32ceee17896e2c58da16db9061f1ee85cd5058fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Neil Horman4ea7e382009-05-21 07:36:08 +0000129#include <trace/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273
274static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290
291static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700292static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
294static inline unsigned short netdev_lock_pos(unsigned short dev_type)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 if (netdev_lock_type[i] == dev_type)
300 return i;
301 /* the last key is used by default */
302 return ARRAY_SIZE(netdev_lock_type) - 1;
303}
304
David S. Millercf508b12008-07-22 14:16:42 -0700305static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700307{
308 int i;
309
310 i = netdev_lock_pos(dev_type);
311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 netdev_lock_name[i]);
313}
David S. Millercf508b12008-07-22 14:16:42 -0700314
315static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
316{
317 int i;
318
319 i = netdev_lock_pos(dev->type);
320 lockdep_set_class_and_name(&dev->addr_list_lock,
321 &netdev_addr_lock_key[i],
322 netdev_lock_name[i]);
323}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700324#else
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
327{
328}
329static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330{
331}
332#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/*******************************************************************************
335
336 Protocol management and registration routines
337
338*******************************************************************************/
339
340/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * Add a protocol ID to the list. Now that the input handler is
342 * smarter we can dispense with all the messy stuff that used to be
343 * here.
344 *
345 * BEWARE!!! Protocol handlers, mangling input packets,
346 * MUST BE last in hash buckets and checking protocol handlers
347 * MUST start from promiscuous ptype_all chain in net_bh.
348 * It is true now, do not change it.
349 * Explanation follows: if protocol handler, mangling packet, will
350 * be the first on list, it is not able to sense, that packet
351 * is cloned and should be copied-on-write, so that it will
352 * change it and subsequent readers will get broken packet.
353 * --ANK (980803)
354 */
355
356/**
357 * dev_add_pack - add packet handler
358 * @pt: packet type declaration
359 *
360 * Add a protocol handler to the networking stack. The passed &packet_type
361 * is linked into kernel lists and may not be freed until it has been
362 * removed from the kernel lists.
363 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900364 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * guarantee all CPU's that are in middle of receiving packets
366 * will see the new packet type (until the next received packet).
367 */
368
369void dev_add_pack(struct packet_type *pt)
370{
371 int hash;
372
373 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700374 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800377 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 list_add_rcu(&pt->list, &ptype_base[hash]);
379 }
380 spin_unlock_bh(&ptype_lock);
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
386 *
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900390 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
395 */
396void __dev_remove_pack(struct packet_type *pt)
397{
398 struct list_head *head;
399 struct packet_type *pt1;
400
401 spin_lock_bh(&ptype_lock);
402
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700403 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 list_for_each_entry(pt1, head, list) {
409 if (pt == pt1) {
410 list_del_rcu(&pt->list);
411 goto out;
412 }
413 }
414
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416out:
417 spin_unlock_bh(&ptype_lock);
418}
419/**
420 * dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
422 *
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
426 * returns.
427 *
428 * This call sleeps to guarantee that no CPU is looking at the packet
429 * type after return.
430 */
431void dev_remove_pack(struct packet_type *pt)
432{
433 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 synchronize_net();
436}
437
438/******************************************************************************
439
440 Device Boot-time Settings Routines
441
442*******************************************************************************/
443
444/* Boot time configuration table */
445static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
446
447/**
448 * netdev_boot_setup_add - add new setup entry
449 * @name: name of the device
450 * @map: configured settings for the device
451 *
452 * Adds new setup entry to the dev_boot_setup list. The function
453 * returns 0 on error and 1 on success. This is a generic routine to
454 * all netdevices.
455 */
456static int netdev_boot_setup_add(char *name, struct ifmap *map)
457{
458 struct netdev_boot_setup *s;
459 int i;
460
461 s = dev_boot_setup;
462 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
463 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
464 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700465 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 memcpy(&s[i].map, map, sizeof(s[i].map));
467 break;
468 }
469 }
470
471 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
472}
473
474/**
475 * netdev_boot_setup_check - check boot time settings
476 * @dev: the netdevice
477 *
478 * Check boot time settings for the device.
479 * The found settings are set for the device to be used
480 * later in the device probing.
481 * Returns 0 if no settings found, 1 if they are.
482 */
483int netdev_boot_setup_check(struct net_device *dev)
484{
485 struct netdev_boot_setup *s = dev_boot_setup;
486 int i;
487
488 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
489 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700490 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 dev->irq = s[i].map.irq;
492 dev->base_addr = s[i].map.base_addr;
493 dev->mem_start = s[i].map.mem_start;
494 dev->mem_end = s[i].map.mem_end;
495 return 1;
496 }
497 }
498 return 0;
499}
500
501
502/**
503 * netdev_boot_base - get address from boot time settings
504 * @prefix: prefix for network device
505 * @unit: id for network device
506 *
507 * Check boot time settings for the base address of device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found.
511 */
512unsigned long netdev_boot_base(const char *prefix, int unit)
513{
514 const struct netdev_boot_setup *s = dev_boot_setup;
515 char name[IFNAMSIZ];
516 int i;
517
518 sprintf(name, "%s%d", prefix, unit);
519
520 /*
521 * If device already registered then return base of 1
522 * to indicate not to probe for this interface
523 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700524 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return 1;
526
527 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
528 if (!strcmp(name, s[i].name))
529 return s[i].map.base_addr;
530 return 0;
531}
532
533/*
534 * Saves at boot time configured settings for any netdevice.
535 */
536int __init netdev_boot_setup(char *str)
537{
538 int ints[5];
539 struct ifmap map;
540
541 str = get_options(str, ARRAY_SIZE(ints), ints);
542 if (!str || !*str)
543 return 0;
544
545 /* Save settings */
546 memset(&map, 0, sizeof(map));
547 if (ints[0] > 0)
548 map.irq = ints[1];
549 if (ints[0] > 1)
550 map.base_addr = ints[2];
551 if (ints[0] > 2)
552 map.mem_start = ints[3];
553 if (ints[0] > 3)
554 map.mem_end = ints[4];
555
556 /* Add new entry to the list */
557 return netdev_boot_setup_add(str, &map);
558}
559
560__setup("netdev=", netdev_boot_setup);
561
562/*******************************************************************************
563
564 Device Interface Subroutines
565
566*******************************************************************************/
567
568/**
569 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700570 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * @name: name to find
572 *
573 * Find an interface by name. Must be called under RTNL semaphore
574 * or @dev_base_lock. If the name is found a pointer to the device
575 * is returned. If the name is not found then %NULL is returned. The
576 * reference counters are not incremented so the caller must be
577 * careful with locks.
578 */
579
Eric W. Biederman881d9662007-09-17 11:56:21 -0700580struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 struct hlist_node *p;
583
Eric W. Biederman881d9662007-09-17 11:56:21 -0700584 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct net_device *dev
586 = hlist_entry(p, struct net_device, name_hlist);
587 if (!strncmp(dev->name, name, IFNAMSIZ))
588 return dev;
589 }
590 return NULL;
591}
592
593/**
594 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700595 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @name: name to find
597 *
598 * Find an interface by name. This can be called from any
599 * context and does its own locking. The returned handle has
600 * the usage count incremented and the caller must use dev_put() to
601 * release it when it is no longer needed. %NULL is returned if no
602 * matching device is found.
603 */
604
Eric W. Biederman881d9662007-09-17 11:56:21 -0700605struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct net_device *dev;
608
609 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 if (dev)
612 dev_hold(dev);
613 read_unlock(&dev_base_lock);
614 return dev;
615}
616
617/**
618 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700619 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 * @ifindex: index of device
621 *
622 * Search for an interface by index. Returns %NULL if the device
623 * is not found or a pointer to the device. The device has not
624 * had its reference counter increased so the caller must be careful
625 * about locking. The caller must hold either the RTNL semaphore
626 * or @dev_base_lock.
627 */
628
Eric W. Biederman881d9662007-09-17 11:56:21 -0700629struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 struct hlist_node *p;
632
Eric W. Biederman881d9662007-09-17 11:56:21 -0700633 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 struct net_device *dev
635 = hlist_entry(p, struct net_device, index_hlist);
636 if (dev->ifindex == ifindex)
637 return dev;
638 }
639 return NULL;
640}
641
642
643/**
644 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700645 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * @ifindex: index of device
647 *
648 * Search for an interface by index. Returns NULL if the device
649 * is not found or a pointer to the device. The device returned has
650 * had a reference added and the pointer is safe until the user calls
651 * dev_put to indicate they have finished with it.
652 */
653
Eric W. Biederman881d9662007-09-17 11:56:21 -0700654struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 struct net_device *dev;
657
658 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700659 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (dev)
661 dev_hold(dev);
662 read_unlock(&dev_base_lock);
663 return dev;
664}
665
666/**
667 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700668 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 * @type: media type of device
670 * @ha: hardware address
671 *
672 * Search for an interface by MAC address. Returns NULL if the device
673 * is not found or a pointer to the device. The caller must hold the
674 * rtnl semaphore. The returned device has not had its ref count increased
675 * and the caller must therefore be careful about locking
676 *
677 * BUGS:
678 * If the API was consistent this would be __dev_get_by_hwaddr
679 */
680
Eric W. Biederman881d9662007-09-17 11:56:21 -0700681struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct net_device *dev;
684
685 ASSERT_RTNL();
686
Denis V. Lunev81103a52007-12-12 10:47:38 -0800687 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (dev->type == type &&
689 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700690 return dev;
691
692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300695EXPORT_SYMBOL(dev_getbyhwaddr);
696
Eric W. Biederman881d9662007-09-17 11:56:21 -0700697struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700698{
699 struct net_device *dev;
700
701 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700702 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700703 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700704 return dev;
705
706 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700707}
708
709EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710
Eric W. Biederman881d9662007-09-17 11:56:21 -0700711struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 struct net_device *dev;
714
715 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717 if (dev)
718 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 rtnl_unlock();
720 return dev;
721}
722
723EXPORT_SYMBOL(dev_getfirstbyhwtype);
724
725/**
726 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700727 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 * @if_flags: IFF_* values
729 * @mask: bitmask of bits in if_flags to check
730 *
731 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900732 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * had a reference added and the pointer is safe until the user calls
734 * dev_put to indicate they have finished with it.
735 */
736
Eric W. Biederman881d9662007-09-17 11:56:21 -0700737struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (((dev->flags ^ if_flags) & mask) == 0) {
745 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700746 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 break;
748 }
749 }
750 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/**
755 * dev_valid_name - check if name is okay for network device
756 * @name: name string
757 *
758 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700759 * to allow sysfs to work. We also disallow any kind of
760 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800762int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700764 if (*name == '\0')
765 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700766 if (strlen(name) >= IFNAMSIZ)
767 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700768 if (!strcmp(name, ".") || !strcmp(name, ".."))
769 return 0;
770
771 while (*name) {
772 if (*name == '/' || isspace(*name))
773 return 0;
774 name++;
775 }
776 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200780 * __dev_alloc_name - allocate a name for a device
781 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200783 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
785 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700786 * id. It scans list of devices to build up a free map, then chooses
787 * the first empty slot. The caller must hold the dev_base or rtnl lock
788 * while allocating the name and adding the device in order to avoid
789 * duplicates.
790 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
791 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 const char *p;
798 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700799 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct net_device *d;
801
802 p = strnchr(name, IFNAMSIZ-1, '%');
803 if (p) {
804 /*
805 * Verify the string as this thing may have come from
806 * the user. There must be either one "%d" and no other "%"
807 * characters.
808 */
809 if (p[1] != 'd' || strchr(p + 2, '%'))
810 return -EINVAL;
811
812 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700813 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!inuse)
815 return -ENOMEM;
816
Eric W. Biederman881d9662007-09-17 11:56:21 -0700817 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!sscanf(d->name, name, &i))
819 continue;
820 if (i < 0 || i >= max_netdevices)
821 continue;
822
823 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200824 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!strncmp(buf, d->name, IFNAMSIZ))
826 set_bit(i, inuse);
827 }
828
829 i = find_first_zero_bit(inuse, max_netdevices);
830 free_page((unsigned long) inuse);
831 }
832
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200833 snprintf(buf, IFNAMSIZ, name, i);
834 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /* It is possible to run out of possible slots
838 * when the name is long and there isn't enough space left
839 * for the digits, or if all bits are used.
840 */
841 return -ENFILE;
842}
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844/**
845 * dev_alloc_name - allocate a name for a device
846 * @dev: device
847 * @name: name format string
848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
856 */
857
858int dev_alloc_name(struct net_device *dev, const char *name)
859{
860 char buf[IFNAMSIZ];
861 struct net *net;
862 int ret;
863
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900864 BUG_ON(!dev_net(dev));
865 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200866 ret = __dev_alloc_name(net, name, buf);
867 if (ret >= 0)
868 strlcpy(dev->name, buf, IFNAMSIZ);
869 return ret;
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873/**
874 * dev_change_name - change name of a device
875 * @dev: device
876 * @newname: name (or format string) must be at least IFNAMSIZ
877 *
878 * Change name of a device, can pass format strings "eth%d".
879 * for wildcarding.
880 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700881int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Herbert Xufcc5a032007-07-30 17:03:38 -0700883 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700886 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900889 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (dev->flags & IFF_UP)
893 return -EBUSY;
894
895 if (!dev_valid_name(newname))
896 return -EINVAL;
897
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700898 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
899 return 0;
900
Herbert Xufcc5a032007-07-30 17:03:38 -0700901 memcpy(oldname, dev->name, IFNAMSIZ);
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (strchr(newname, '%')) {
904 err = dev_alloc_name(dev, newname);
905 if (err < 0)
906 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700908 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return -EEXIST;
910 else
911 strlcpy(dev->name, newname, IFNAMSIZ);
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700914 /* For now only devices in the initial network namespace
915 * are in sysfs.
916 */
917 if (net == &init_net) {
918 ret = device_rename(&dev->dev, dev->name);
919 if (ret) {
920 memcpy(dev->name, oldname, IFNAMSIZ);
921 return ret;
922 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700923 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700924
925 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600926 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700927 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700928 write_unlock_bh(&dev_base_lock);
929
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700930 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700931 ret = notifier_to_errno(ret);
932
933 if (ret) {
934 if (err) {
935 printk(KERN_ERR
936 "%s: name change rollback failed: %d.\n",
937 dev->name, ret);
938 } else {
939 err = ret;
940 memcpy(dev->name, oldname, IFNAMSIZ);
941 goto rollback;
942 }
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 return err;
946}
947
948/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700949 * dev_set_alias - change ifalias of a device
950 * @dev: device
951 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700952 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700953 *
954 * Set ifalias for a device,
955 */
956int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
957{
958 ASSERT_RTNL();
959
960 if (len >= IFALIASZ)
961 return -EINVAL;
962
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700963 if (!len) {
964 if (dev->ifalias) {
965 kfree(dev->ifalias);
966 dev->ifalias = NULL;
967 }
968 return 0;
969 }
970
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700971 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
972 if (!dev->ifalias)
973 return -ENOMEM;
974
975 strlcpy(dev->ifalias, alias, len+1);
976 return len;
977}
978
979
980/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700981 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700982 * @dev: device to cause notification
983 *
984 * Called to indicate a device has changed features.
985 */
986void netdev_features_change(struct net_device *dev)
987{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700988 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700989}
990EXPORT_SYMBOL(netdev_features_change);
991
992/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * netdev_state_change - device changes state
994 * @dev: device to cause notification
995 *
996 * Called to indicate a device has changed state. This function calls
997 * the notifier chains for netdev_chain and sends a NEWLINK message
998 * to the routing socket.
999 */
1000void netdev_state_change(struct net_device *dev)
1001{
1002 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001003 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1005 }
1006}
1007
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001008void netdev_bonding_change(struct net_device *dev)
1009{
1010 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011}
1012EXPORT_SYMBOL(netdev_bonding_change);
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014/**
1015 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001016 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 * @name: name of interface
1018 *
1019 * If a network interface is not present and the process has suitable
1020 * privileges this function loads the module. If module loading is not
1021 * available in this kernel then it becomes a nop.
1022 */
1023
Eric W. Biederman881d9662007-09-17 11:56:21 -07001024void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001026 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001029 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 read_unlock(&dev_base_lock);
1031
1032 if (!dev && capable(CAP_SYS_MODULE))
1033 request_module("%s", name);
1034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036/**
1037 * dev_open - prepare an interface for use.
1038 * @dev: device to open
1039 *
1040 * Takes a device from down to up state. The device's private open
1041 * function is invoked and then the multicast lists are loaded. Finally
1042 * the device is moved into the up state and a %NETDEV_UP message is
1043 * sent to the netdev notifier chain.
1044 *
1045 * Calling this function on an active interface is a nop. On a failure
1046 * a negative errno code is returned.
1047 */
1048int dev_open(struct net_device *dev)
1049{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001050 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 int ret = 0;
1052
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001053 ASSERT_RTNL();
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 /*
1056 * Is it already up?
1057 */
1058
1059 if (dev->flags & IFF_UP)
1060 return 0;
1061
1062 /*
1063 * Is it even present?
1064 */
1065 if (!netif_device_present(dev))
1066 return -ENODEV;
1067
1068 /*
1069 * Call device private open method
1070 */
1071 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001072
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001073 if (ops->ndo_validate_addr)
1074 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001075
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001076 if (!ret && ops->ndo_open)
1077 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 * If it went open OK then:
1081 */
1082
Jeff Garzikbada3392007-10-23 20:19:37 -07001083 if (ret)
1084 clear_bit(__LINK_STATE_START, &dev->state);
1085 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * Set the flags.
1088 */
1089 dev->flags |= IFF_UP;
1090
1091 /*
Dan Williams649274d2009-01-11 00:20:39 -08001092 * Enable NET_DMA
1093 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001094 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001095
1096 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * Initialize multicasting status
1098 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001099 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /*
1102 * Wakeup transmit queue engine
1103 */
1104 dev_activate(dev);
1105
1106 /*
1107 * ... and announce new interface.
1108 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001109 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return ret;
1113}
1114
1115/**
1116 * dev_close - shutdown an interface.
1117 * @dev: device to shutdown
1118 *
1119 * This function moves an active device into down state. A
1120 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1121 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1122 * chain.
1123 */
1124int dev_close(struct net_device *dev)
1125{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001126 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001127 ASSERT_RTNL();
1128
David S. Miller9d5010d2007-09-12 14:33:25 +02001129 might_sleep();
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001150 dev_deactivate(dev);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001159 if (ops->ndo_stop)
1160 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001171 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Dan Williams649274d2009-01-11 00:20:39 -08001173 /*
1174 * Shutdown NET_DMA
1175 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001176 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 return 0;
1179}
1180
1181
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001182/**
1183 * dev_disable_lro - disable Large Receive Offload on a device
1184 * @dev: device
1185 *
1186 * Disable Large Receive Offload (LRO) on a net device. Must be
1187 * called under RTNL. This is needed if received packets may be
1188 * forwarded to another interface.
1189 */
1190void dev_disable_lro(struct net_device *dev)
1191{
1192 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1193 dev->ethtool_ops->set_flags) {
1194 u32 flags = dev->ethtool_ops->get_flags(dev);
1195 if (flags & ETH_FLAG_LRO) {
1196 flags &= ~ETH_FLAG_LRO;
1197 dev->ethtool_ops->set_flags(dev, flags);
1198 }
1199 }
1200 WARN_ON(dev->features & NETIF_F_LRO);
1201}
1202EXPORT_SYMBOL(dev_disable_lro);
1203
1204
Eric W. Biederman881d9662007-09-17 11:56:21 -07001205static int dev_boot_phase = 1;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/*
1208 * Device change register/unregister. These are not inline or static
1209 * as we export them to the world.
1210 */
1211
1212/**
1213 * register_netdevice_notifier - register a network notifier block
1214 * @nb: notifier
1215 *
1216 * Register a notifier to be called when network device events occur.
1217 * The notifier passed is linked into the kernel structures and must
1218 * not be reused until it has been unregistered. A negative errno code
1219 * is returned on a failure.
1220 *
1221 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001222 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 * view of the network device list.
1224 */
1225
1226int register_netdevice_notifier(struct notifier_block *nb)
1227{
1228 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001229 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001230 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 int err;
1232
1233 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001234 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001235 if (err)
1236 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 if (dev_boot_phase)
1238 goto unlock;
1239 for_each_net(net) {
1240 for_each_netdev(net, dev) {
1241 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1242 err = notifier_to_errno(err);
1243 if (err)
1244 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Eric W. Biederman881d9662007-09-17 11:56:21 -07001246 if (!(dev->flags & IFF_UP))
1247 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001248
Eric W. Biederman881d9662007-09-17 11:56:21 -07001249 nb->notifier_call(nb, NETDEV_UP, dev);
1250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001252
1253unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 rtnl_unlock();
1255 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001256
1257rollback:
1258 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001259 for_each_net(net) {
1260 for_each_netdev(net, dev) {
1261 if (dev == last)
1262 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
Eric W. Biederman881d9662007-09-17 11:56:21 -07001264 if (dev->flags & IFF_UP) {
1265 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1266 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 }
1268 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001269 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001270 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001271
1272 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001273 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/**
1277 * unregister_netdevice_notifier - unregister a network notifier block
1278 * @nb: notifier
1279 *
1280 * Unregister a notifier previously registered by
1281 * register_netdevice_notifier(). The notifier is unlinked into the
1282 * kernel structures and may then be reused. A negative errno code
1283 * is returned on a failure.
1284 */
1285
1286int unregister_netdevice_notifier(struct notifier_block *nb)
1287{
Herbert Xu9f514952006-03-25 01:24:25 -08001288 int err;
1289
1290 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001291 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001292 rtnl_unlock();
1293 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296/**
1297 * call_netdevice_notifiers - call all network notifier blocks
1298 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001299 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 *
1301 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001302 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
1304
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001305int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001307 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
1310/* When > 0 there are consumers of rx skb time stamps */
1311static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312
1313void net_enable_timestamp(void)
1314{
1315 atomic_inc(&netstamp_needed);
1316}
1317
1318void net_disable_timestamp(void)
1319{
1320 atomic_dec(&netstamp_needed);
1321}
1322
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001323static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001326 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001327 else
1328 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329}
1330
1331/*
1332 * Support routine. Sends outgoing frames to any network
1333 * taps currently in use.
1334 */
1335
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001336static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001339
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001340#ifdef CONFIG_NET_CLS_ACT
1341 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1342 net_timestamp(skb);
1343#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001344 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001345#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 rcu_read_lock();
1348 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1349 /* Never send packets back to the socket
1350 * they originated from - MvS (miquels@drinkel.ow.org)
1351 */
1352 if ((ptype->dev == dev || !ptype->dev) &&
1353 (ptype->af_packet_priv == NULL ||
1354 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1355 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1356 if (!skb2)
1357 break;
1358
1359 /* skb->nh should be correctly
1360 set by sender, so that the second statement is
1361 just protection against buggy protocols.
1362 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001363 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001365 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001366 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (net_ratelimit())
1368 printk(KERN_CRIT "protocol %04x is "
1369 "buggy, dev %s\n",
1370 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001371 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 }
1373
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001374 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001376 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 }
1379 rcu_read_unlock();
1380}
1381
Denis Vlasenko56079432006-03-29 15:57:29 -08001382
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001383static inline void __netif_reschedule(struct Qdisc *q)
1384{
1385 struct softnet_data *sd;
1386 unsigned long flags;
1387
1388 local_irq_save(flags);
1389 sd = &__get_cpu_var(softnet_data);
1390 q->next_sched = sd->output_queue;
1391 sd->output_queue = q;
1392 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1393 local_irq_restore(flags);
1394}
1395
David S. Miller37437bb2008-07-16 02:15:04 -07001396void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001397{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001398 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1399 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001400}
1401EXPORT_SYMBOL(__netif_schedule);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001405 if (atomic_dec_and_test(&skb->users)) {
1406 struct softnet_data *sd;
1407 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001408
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 skb->next = sd->completion_queue;
1412 sd->completion_queue = skb;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1415 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001416}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001417EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001418
1419void dev_kfree_skb_any(struct sk_buff *skb)
1420{
1421 if (in_irq() || irqs_disabled())
1422 dev_kfree_skb_irq(skb);
1423 else
1424 dev_kfree_skb(skb);
1425}
1426EXPORT_SYMBOL(dev_kfree_skb_any);
1427
1428
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001429/**
1430 * netif_device_detach - mark device as removed
1431 * @dev: network device
1432 *
1433 * Mark device as removed from system and therefore no longer available.
1434 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001435void netif_device_detach(struct net_device *dev)
1436{
1437 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1438 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001439 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001440 }
1441}
1442EXPORT_SYMBOL(netif_device_detach);
1443
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001444/**
1445 * netif_device_attach - mark device as attached
1446 * @dev: network device
1447 *
1448 * Mark device as attached from system and restart if needed.
1449 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001450void netif_device_attach(struct net_device *dev)
1451{
1452 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1453 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001454 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001456 }
1457}
1458EXPORT_SYMBOL(netif_device_attach);
1459
Ben Hutchings6de329e2008-06-16 17:02:28 -07001460static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1461{
1462 return ((features & NETIF_F_GEN_CSUM) ||
1463 ((features & NETIF_F_IP_CSUM) &&
1464 protocol == htons(ETH_P_IP)) ||
1465 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001466 protocol == htons(ETH_P_IPV6)) ||
1467 ((features & NETIF_F_FCOE_CRC) &&
1468 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001469}
1470
1471static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1472{
1473 if (can_checksum_protocol(dev->features, skb->protocol))
1474 return true;
1475
1476 if (skb->protocol == htons(ETH_P_8021Q)) {
1477 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1478 if (can_checksum_protocol(dev->features & dev->vlan_features,
1479 veh->h_vlan_encapsulated_proto))
1480 return true;
1481 }
1482
1483 return false;
1484}
Denis Vlasenko56079432006-03-29 15:57:29 -08001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486/*
1487 * Invalidate hardware checksum when packet is to be mangled, and
1488 * complete checksum manually on outgoing path.
1489 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001490int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
Al Virod3bc23e2006-11-14 21:24:49 -08001492 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001493 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Patrick McHardy84fa7932006-08-29 16:44:56 -07001495 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001496 goto out_set_summed;
1497
1498 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001499 /* Let GSO fix up the checksum. */
1500 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 }
1502
Herbert Xua0308472007-10-15 01:47:15 -07001503 offset = skb->csum_start - skb_headroom(skb);
1504 BUG_ON(offset >= skb_headlen(skb));
1505 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1506
1507 offset += skb->csum_offset;
1508 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1509
1510 if (skb_cloned(skb) &&
1511 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1513 if (ret)
1514 goto out;
1515 }
1516
Herbert Xua0308472007-10-15 01:47:15 -07001517 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001518out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001520out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001524/**
1525 * skb_gso_segment - Perform segmentation on skb.
1526 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001527 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001528 *
1529 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001530 *
1531 * It may return NULL if the skb requires no segmentation. This is
1532 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001533 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001534struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535{
1536 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1537 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001538 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001539 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001541 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001542 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001543 __skb_pull(skb, skb->mac_len);
1544
Herbert Xu67fd1a72009-01-19 16:26:44 -08001545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1546 struct net_device *dev = skb->dev;
1547 struct ethtool_drvinfo info = {};
1548
1549 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1550 dev->ethtool_ops->get_drvinfo(dev, &info);
1551
1552 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1553 "ip_summed=%d",
1554 info.driver, dev ? dev->features : 0L,
1555 skb->sk ? skb->sk->sk_route_caps : 0L,
1556 skb->len, skb->data_len, skb->ip_summed);
1557
Herbert Xua430a432006-07-08 13:34:56 -07001558 if (skb_header_cloned(skb) &&
1559 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1560 return ERR_PTR(err);
1561 }
1562
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001563 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001564 list_for_each_entry_rcu(ptype,
1565 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001566 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001568 err = ptype->gso_send_check(skb);
1569 segs = ERR_PTR(err);
1570 if (err || skb_gso_ok(skb, features))
1571 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001572 __skb_push(skb, (skb->data -
1573 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001574 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001575 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001576 break;
1577 }
1578 }
1579 rcu_read_unlock();
1580
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001581 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001582
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 return segs;
1584}
1585
1586EXPORT_SYMBOL(skb_gso_segment);
1587
Herbert Xufb286bb2005-11-10 13:01:24 -08001588/* Take action when hardware reception checksum errors are detected. */
1589#ifdef CONFIG_BUG
1590void netdev_rx_csum_fault(struct net_device *dev)
1591{
1592 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001593 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001594 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001595 dump_stack();
1596 }
1597}
1598EXPORT_SYMBOL(netdev_rx_csum_fault);
1599#endif
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601/* Actually, we should eliminate this check as soon as we know, that:
1602 * 1. IOMMU is present and allows to map all the memory.
1603 * 2. No high memory really exists on this machine.
1604 */
1605
1606static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1607{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001608#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 int i;
1610
1611 if (dev->features & NETIF_F_HIGHDMA)
1612 return 0;
1613
1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1615 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1616 return 1;
1617
Herbert Xu3d3a8532006-06-27 13:33:10 -07001618#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 return 0;
1620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001622struct dev_gso_cb {
1623 void (*destructor)(struct sk_buff *skb);
1624};
1625
1626#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1627
1628static void dev_gso_skb_destructor(struct sk_buff *skb)
1629{
1630 struct dev_gso_cb *cb;
1631
1632 do {
1633 struct sk_buff *nskb = skb->next;
1634
1635 skb->next = nskb->next;
1636 nskb->next = NULL;
1637 kfree_skb(nskb);
1638 } while (skb->next);
1639
1640 cb = DEV_GSO_CB(skb);
1641 if (cb->destructor)
1642 cb->destructor(skb);
1643}
1644
1645/**
1646 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1647 * @skb: buffer to segment
1648 *
1649 * This function segments the given skb and stores the list of segments
1650 * in skb->next.
1651 */
1652static int dev_gso_segment(struct sk_buff *skb)
1653{
1654 struct net_device *dev = skb->dev;
1655 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1657 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001658
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 segs = skb_gso_segment(skb, features);
1660
1661 /* Verifying header integrity only. */
1662 if (!segs)
1663 return 0;
1664
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001665 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666 return PTR_ERR(segs);
1667
1668 skb->next = segs;
1669 DEV_GSO_CB(skb)->destructor = skb->destructor;
1670 skb->destructor = dev_gso_skb_destructor;
1671
1672 return 0;
1673}
1674
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001675int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1676 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001677{
Stephen Hemminger00829822008-11-20 20:14:53 -08001678 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001679 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001680
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001681 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001682 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001683 dev_queue_xmit_nit(skb, dev);
1684
Herbert Xu576a30e2006-06-27 13:22:38 -07001685 if (netif_needs_gso(dev, skb)) {
1686 if (unlikely(dev_gso_segment(skb)))
1687 goto out_kfree_skb;
1688 if (skb->next)
1689 goto gso;
1690 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001691
Eric Dumazet93f154b2009-05-18 22:19:19 -07001692 /*
1693 * If device doesnt need skb->dst, release it right now while
1694 * its hot in this cpu cache
1695 */
1696 if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) {
1697 dst_release(skb->dst);
1698 skb->dst = NULL;
1699 }
Patrick Ohlyac45f602009-02-12 05:03:37 +00001700 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001701 if (rc == 0)
1702 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001703 /*
1704 * TODO: if skb_orphan() was called by
1705 * dev->hard_start_xmit() (for example, the unmodified
1706 * igb driver does that; bnx2 doesn't), then
1707 * skb_tx_software_timestamp() will be unable to send
1708 * back the time stamp.
1709 *
1710 * How can this be prevented? Always create another
1711 * reference to the socket before calling
1712 * dev->hard_start_xmit()? Prevent that skb_orphan()
1713 * does anything in dev->hard_start_xmit() by clearing
1714 * the skb destructor before the call and restoring it
1715 * afterwards, then doing the skb_orphan() ourselves?
1716 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001717 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001718 }
1719
Herbert Xu576a30e2006-06-27 13:22:38 -07001720gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001721 do {
1722 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001723
1724 skb->next = nskb->next;
1725 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001726 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001728 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729 skb->next = nskb;
1730 return rc;
1731 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001732 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001733 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001736
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001737 skb->destructor = DEV_GSO_CB(skb)->destructor;
1738
1739out_kfree_skb:
1740 kfree_skb(skb);
1741 return 0;
1742}
1743
David S. Miller70192982009-01-27 16:34:47 -08001744static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001745
Stephen Hemminger92477442009-03-21 13:39:26 -07001746u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001747{
David S. Miller70192982009-01-27 16:34:47 -08001748 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001749
David S. Miller513de112009-05-03 14:43:10 -07001750 if (skb_rx_queue_recorded(skb)) {
1751 hash = skb_get_rx_queue(skb);
1752 while (unlikely (hash >= dev->real_num_tx_queues))
1753 hash -= dev->real_num_tx_queues;
1754 return hash;
1755 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001756
1757 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001758 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001759 else
David S. Miller70192982009-01-27 16:34:47 -08001760 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001761
David S. Miller70192982009-01-27 16:34:47 -08001762 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001763
David S. Millerb6b2fed2008-07-21 09:48:06 -07001764 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001765}
Stephen Hemminger92477442009-03-21 13:39:26 -07001766EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001767
David S. Millere8a04642008-07-17 00:34:19 -07001768static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1769 struct sk_buff *skb)
1770{
Stephen Hemminger00829822008-11-20 20:14:53 -08001771 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001772 u16 queue_index = 0;
1773
Stephen Hemminger00829822008-11-20 20:14:53 -08001774 if (ops->ndo_select_queue)
1775 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001776 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001777 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001778
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001779 skb_set_queue_mapping(skb, queue_index);
1780 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001781}
1782
Dave Jonesd29f7492008-07-22 14:09:06 -07001783/**
1784 * dev_queue_xmit - transmit a buffer
1785 * @skb: buffer to transmit
1786 *
1787 * Queue a buffer for transmission to a network device. The caller must
1788 * have set the device and priority and built the buffer before calling
1789 * this function. The function can be called from an interrupt.
1790 *
1791 * A negative errno code is returned on a failure. A success does not
1792 * guarantee the frame will be transmitted as it may be dropped due
1793 * to congestion or traffic shaping.
1794 *
1795 * -----------------------------------------------------------------------------------
1796 * I notice this method can also return errors from the queue disciplines,
1797 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1798 * be positive.
1799 *
1800 * Regardless of the return value, the skb is consumed, so it is currently
1801 * difficult to retry a send to this method. (You can bump the ref count
1802 * before sending to hold a reference for retry if you are careful.)
1803 *
1804 * When calling this method, interrupts MUST be enabled. This is because
1805 * the BH enable code must have IRQs enabled so that it will not deadlock.
1806 * --BLG
1807 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808int dev_queue_xmit(struct sk_buff *skb)
1809{
1810 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001811 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct Qdisc *q;
1813 int rc = -ENOMEM;
1814
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001815 /* GSO will handle the following emulations directly. */
1816 if (netif_needs_gso(dev, skb))
1817 goto gso;
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 if (skb_shinfo(skb)->frag_list &&
1820 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001821 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 goto out_kfree_skb;
1823
1824 /* Fragmented skb is linearized if device does not support SG,
1825 * or if at least one of fragments is in highmem and device
1826 * does not support DMA from it.
1827 */
1828 if (skb_shinfo(skb)->nr_frags &&
1829 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001830 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 goto out_kfree_skb;
1832
1833 /* If packet is not checksummed and device does not support
1834 * checksumming for this protocol, complete checksumming here.
1835 */
Herbert Xu663ead32007-04-09 11:59:07 -07001836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1837 skb_set_transport_header(skb, skb->csum_start -
1838 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001839 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1840 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001843gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001844 /* Disable soft irqs for various locks below. Also
1845 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001847 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
David S. Millereae792b2008-07-15 03:03:33 -07001849 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001850 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852#ifdef CONFIG_NET_CLS_ACT
1853 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1854#endif
1855 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001856 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
David S. Miller37437bb2008-07-16 02:15:04 -07001858 spin_lock(root_lock);
1859
David S. Millera9312ae2008-08-17 21:51:03 -07001860 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001861 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001862 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001863 } else {
1864 rc = qdisc_enqueue_root(skb, q);
1865 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001866 }
David S. Miller37437bb2008-07-16 02:15:04 -07001867 spin_unlock(root_lock);
1868
David S. Miller37437bb2008-07-16 02:15:04 -07001869 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871
1872 /* The device has no queue. Common case for software devices:
1873 loopback, all the sorts of tunnels...
1874
Herbert Xu932ff272006-06-09 12:20:56 -07001875 Really, it is unlikely that netif_tx_lock protection is necessary
1876 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 counters.)
1878 However, it is possible, that they rely on protection
1879 made by us here.
1880
1881 Check this and shot the lock. It is not prone from deadlocks.
1882 Either shot noqueue qdisc, it is even simpler 8)
1883 */
1884 if (dev->flags & IFF_UP) {
1885 int cpu = smp_processor_id(); /* ok because BHs are off */
1886
David S. Millerc773e842008-07-08 23:13:53 -07001887 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
David S. Millerc773e842008-07-08 23:13:53 -07001889 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001891 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001893 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001894 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 goto out;
1896 }
1897 }
David S. Millerc773e842008-07-08 23:13:53 -07001898 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 if (net_ratelimit())
1900 printk(KERN_CRIT "Virtual device %s asks to "
1901 "queue packet!\n", dev->name);
1902 } else {
1903 /* Recursion is detected! It is possible,
1904 * unfortunately */
1905 if (net_ratelimit())
1906 printk(KERN_CRIT "Dead loop on virtual device "
1907 "%s, fix it urgently!\n", dev->name);
1908 }
1909 }
1910
1911 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001912 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914out_kfree_skb:
1915 kfree_skb(skb);
1916 return rc;
1917out:
Herbert Xud4828d82006-06-22 02:28:18 -07001918 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 return rc;
1920}
1921
1922
1923/*=======================================================================
1924 Receiver routines
1925 =======================================================================*/
1926
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001927int netdev_max_backlog __read_mostly = 1000;
1928int netdev_budget __read_mostly = 300;
1929int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1932
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934/**
1935 * netif_rx - post buffer to the network code
1936 * @skb: buffer to post
1937 *
1938 * This function receives a packet from a device driver and queues it for
1939 * the upper (protocol) levels to process. It always succeeds. The buffer
1940 * may be dropped during processing for congestion control or by the
1941 * protocol layers.
1942 *
1943 * return values:
1944 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 * NET_RX_DROP (packet was dropped)
1946 *
1947 */
1948
1949int netif_rx(struct sk_buff *skb)
1950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 struct softnet_data *queue;
1952 unsigned long flags;
1953
1954 /* if netpoll wants it, pretend we never saw it */
1955 if (netpoll_rx(skb))
1956 return NET_RX_DROP;
1957
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001958 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001959 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 /*
1962 * The code is rearranged so that the path is the most
1963 * short when CPU is congested, but is still operating.
1964 */
1965 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 queue = &__get_cpu_var(softnet_data);
1967
1968 __get_cpu_var(netdev_rx_stat).total++;
1969 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1970 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001974 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 }
1976
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001977 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 goto enqueue;
1979 }
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 __get_cpu_var(netdev_rx_stat).dropped++;
1982 local_irq_restore(flags);
1983
1984 kfree_skb(skb);
1985 return NET_RX_DROP;
1986}
1987
1988int netif_rx_ni(struct sk_buff *skb)
1989{
1990 int err;
1991
1992 preempt_disable();
1993 err = netif_rx(skb);
1994 if (local_softirq_pending())
1995 do_softirq();
1996 preempt_enable();
1997
1998 return err;
1999}
2000
2001EXPORT_SYMBOL(netif_rx_ni);
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003static void net_tx_action(struct softirq_action *h)
2004{
2005 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2006
2007 if (sd->completion_queue) {
2008 struct sk_buff *clist;
2009
2010 local_irq_disable();
2011 clist = sd->completion_queue;
2012 sd->completion_queue = NULL;
2013 local_irq_enable();
2014
2015 while (clist) {
2016 struct sk_buff *skb = clist;
2017 clist = clist->next;
2018
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002019 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 __kfree_skb(skb);
2021 }
2022 }
2023
2024 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002025 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027 local_irq_disable();
2028 head = sd->output_queue;
2029 sd->output_queue = NULL;
2030 local_irq_enable();
2031
2032 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002033 struct Qdisc *q = head;
2034 spinlock_t *root_lock;
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 head = head->next_sched;
2037
David S. Miller5fb66222008-08-02 20:02:43 -07002038 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002039 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002040 smp_mb__before_clear_bit();
2041 clear_bit(__QDISC_STATE_SCHED,
2042 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002043 qdisc_run(q);
2044 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002046 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002047 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002048 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002049 } else {
2050 smp_mb__before_clear_bit();
2051 clear_bit(__QDISC_STATE_SCHED,
2052 &q->state);
2053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
2055 }
2056 }
2057}
2058
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002059static inline int deliver_skb(struct sk_buff *skb,
2060 struct packet_type *pt_prev,
2061 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002064 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}
2066
2067#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002068/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069struct net_bridge;
2070struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2071 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002072void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Stephen Hemminger6229e362007-03-21 13:38:47 -07002074/*
2075 * If bridge module is loaded call bridging hook.
2076 * returns NULL if packet was consumed.
2077 */
2078struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2079 struct sk_buff *skb) __read_mostly;
2080static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2081 struct packet_type **pt_prev, int *ret,
2082 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
2084 struct net_bridge_port *port;
2085
Stephen Hemminger6229e362007-03-21 13:38:47 -07002086 if (skb->pkt_type == PACKET_LOOPBACK ||
2087 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2088 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002091 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002093 }
2094
Stephen Hemminger6229e362007-03-21 13:38:47 -07002095 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002098#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099#endif
2100
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002101#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2102struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2103EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2104
2105static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2106 struct packet_type **pt_prev,
2107 int *ret,
2108 struct net_device *orig_dev)
2109{
2110 if (skb->dev->macvlan_port == NULL)
2111 return skb;
2112
2113 if (*pt_prev) {
2114 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2115 *pt_prev = NULL;
2116 }
2117 return macvlan_handle_frame_hook(skb);
2118}
2119#else
2120#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2121#endif
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123#ifdef CONFIG_NET_CLS_ACT
2124/* TODO: Maybe we should just force sch_ingress to be compiled in
2125 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2126 * a compare and 2 stores extra right now if we dont have it on
2127 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002128 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 * the ingress scheduler, you just cant add policies on ingress.
2130 *
2131 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002132static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002135 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002136 struct netdev_queue *rxq;
2137 int result = TC_ACT_OK;
2138 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002139
Herbert Xuf697c3e2007-10-14 00:38:47 -07002140 if (MAX_RED_LOOP < ttl++) {
2141 printk(KERN_WARNING
2142 "Redir loop detected Dropping packet (%d->%d)\n",
2143 skb->iif, dev->ifindex);
2144 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 }
2146
Herbert Xuf697c3e2007-10-14 00:38:47 -07002147 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2148 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2149
David S. Miller555353c2008-07-08 17:33:13 -07002150 rxq = &dev->rx_queue;
2151
David S. Miller83874002008-07-17 00:53:03 -07002152 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002153 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002154 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002155 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2156 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002157 spin_unlock(qdisc_lock(q));
2158 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return result;
2161}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002162
2163static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2164 struct packet_type **pt_prev,
2165 int *ret, struct net_device *orig_dev)
2166{
David S. Miller8d50b532008-07-30 02:37:46 -07002167 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002168 goto out;
2169
2170 if (*pt_prev) {
2171 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2172 *pt_prev = NULL;
2173 } else {
2174 /* Huh? Why does turning on AF_PACKET affect this? */
2175 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2176 }
2177
2178 switch (ing_filter(skb)) {
2179 case TC_ACT_SHOT:
2180 case TC_ACT_STOLEN:
2181 kfree_skb(skb);
2182 return NULL;
2183 }
2184
2185out:
2186 skb->tc_verd = 0;
2187 return skb;
2188}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189#endif
2190
Patrick McHardybc1d0412008-07-14 22:49:30 -07002191/*
2192 * netif_nit_deliver - deliver received packets to network taps
2193 * @skb: buffer
2194 *
2195 * This function is used to deliver incoming packets to network
2196 * taps. It should be used when the normal netif_receive_skb path
2197 * is bypassed, for example because of VLAN acceleration.
2198 */
2199void netif_nit_deliver(struct sk_buff *skb)
2200{
2201 struct packet_type *ptype;
2202
2203 if (list_empty(&ptype_all))
2204 return;
2205
2206 skb_reset_network_header(skb);
2207 skb_reset_transport_header(skb);
2208 skb->mac_len = skb->network_header - skb->mac_header;
2209
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2212 if (!ptype->dev || ptype->dev == skb->dev)
2213 deliver_skb(skb, ptype, skb->dev);
2214 }
2215 rcu_read_unlock();
2216}
2217
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002218/**
2219 * netif_receive_skb - process receive buffer from network
2220 * @skb: buffer to process
2221 *
2222 * netif_receive_skb() is the main receive data processing function.
2223 * It always succeeds. The buffer may be dropped during processing
2224 * for congestion control or by the protocol layers.
2225 *
2226 * This function may only be called from softirq context and interrupts
2227 * should be enabled.
2228 *
2229 * Return values (usually ignored):
2230 * NET_RX_SUCCESS: no congestion
2231 * NET_RX_DROP: packet was dropped
2232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233int netif_receive_skb(struct sk_buff *skb)
2234{
2235 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002236 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002237 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002239 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002241 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2242 return NET_RX_SUCCESS;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002245 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return NET_RX_DROP;
2247
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002248 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002249 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Patrick McHardyc01003c2007-03-29 11:46:52 -07002251 if (!skb->iif)
2252 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002253
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002254 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002255 orig_dev = skb->dev;
2256 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002257 if (skb_bond_should_drop(skb))
2258 null_or_orig = orig_dev; /* deliver only exact match */
2259 else
2260 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002261 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 __get_cpu_var(netdev_rx_stat).total++;
2264
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002265 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002266 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002267 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 pt_prev = NULL;
2270
2271 rcu_read_lock();
2272
2273#ifdef CONFIG_NET_CLS_ACT
2274 if (skb->tc_verd & TC_NCLS) {
2275 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2276 goto ncls;
2277 }
2278#endif
2279
2280 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002281 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2282 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002283 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002284 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 pt_prev = ptype;
2286 }
2287 }
2288
2289#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002290 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2291 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293ncls:
2294#endif
2295
Stephen Hemminger6229e362007-03-21 13:38:47 -07002296 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2297 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002299 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2300 if (!skb)
2301 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Herbert Xu9a279bc2009-02-04 16:55:27 -08002303 skb_orphan(skb);
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002306 list_for_each_entry_rcu(ptype,
2307 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002309 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2310 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002311 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002312 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 pt_prev = ptype;
2314 }
2315 }
2316
2317 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002318 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 } else {
2320 kfree_skb(skb);
2321 /* Jamal, now you will not able to escape explaining
2322 * me how you were going to use this. :-)
2323 */
2324 ret = NET_RX_DROP;
2325 }
2326
2327out:
2328 rcu_read_unlock();
2329 return ret;
2330}
2331
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002332/* Network device is going away, flush any packets still pending */
2333static void flush_backlog(void *arg)
2334{
2335 struct net_device *dev = arg;
2336 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2337 struct sk_buff *skb, *tmp;
2338
2339 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2340 if (skb->dev == dev) {
2341 __skb_unlink(skb, &queue->input_pkt_queue);
2342 kfree_skb(skb);
2343 }
2344}
2345
Herbert Xud565b0a2008-12-15 23:38:52 -08002346static int napi_gro_complete(struct sk_buff *skb)
2347{
2348 struct packet_type *ptype;
2349 __be16 type = skb->protocol;
2350 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2351 int err = -ENOENT;
2352
Herbert Xufc59f9a2009-04-14 15:11:06 -07002353 if (NAPI_GRO_CB(skb)->count == 1) {
2354 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002355 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002356 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002357
2358 rcu_read_lock();
2359 list_for_each_entry_rcu(ptype, head, list) {
2360 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2361 continue;
2362
2363 err = ptype->gro_complete(skb);
2364 break;
2365 }
2366 rcu_read_unlock();
2367
2368 if (err) {
2369 WARN_ON(&ptype->list == head);
2370 kfree_skb(skb);
2371 return NET_RX_SUCCESS;
2372 }
2373
2374out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002375 return netif_receive_skb(skb);
2376}
2377
2378void napi_gro_flush(struct napi_struct *napi)
2379{
2380 struct sk_buff *skb, *next;
2381
2382 for (skb = napi->gro_list; skb; skb = next) {
2383 next = skb->next;
2384 skb->next = NULL;
2385 napi_gro_complete(skb);
2386 }
2387
Herbert Xu4ae55442009-02-08 18:00:36 +00002388 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002389 napi->gro_list = NULL;
2390}
2391EXPORT_SYMBOL(napi_gro_flush);
2392
Herbert Xu96e93ea2009-01-06 10:49:34 -08002393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002394{
2395 struct sk_buff **pp = NULL;
2396 struct packet_type *ptype;
2397 __be16 type = skb->protocol;
2398 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002399 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002400 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002401 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002402
2403 if (!(skb->dev->features & NETIF_F_GRO))
2404 goto normal;
2405
Herbert Xuf17f5c92009-01-14 14:36:12 -08002406 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2407 goto normal;
2408
Herbert Xud565b0a2008-12-15 23:38:52 -08002409 rcu_read_lock();
2410 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002411 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2412 continue;
2413
Herbert Xu86911732009-01-29 14:19:50 +00002414 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002415 mac_len = skb->network_header - skb->mac_header;
2416 skb->mac_len = mac_len;
2417 NAPI_GRO_CB(skb)->same_flow = 0;
2418 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002419 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002420
Herbert Xud565b0a2008-12-15 23:38:52 -08002421 pp = ptype->gro_receive(&napi->gro_list, skb);
2422 break;
2423 }
2424 rcu_read_unlock();
2425
2426 if (&ptype->list == head)
2427 goto normal;
2428
Herbert Xu0da2afd52008-12-26 14:57:42 -08002429 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002430 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002431
Herbert Xud565b0a2008-12-15 23:38:52 -08002432 if (pp) {
2433 struct sk_buff *nskb = *pp;
2434
2435 *pp = nskb->next;
2436 nskb->next = NULL;
2437 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002438 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002439 }
2440
Herbert Xu0da2afd52008-12-26 14:57:42 -08002441 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002442 goto ok;
2443
Herbert Xu4ae55442009-02-08 18:00:36 +00002444 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002445 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002446
Herbert Xu4ae55442009-02-08 18:00:36 +00002447 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002448 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002449 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002450 skb->next = napi->gro_list;
2451 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002452 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002453
Herbert Xuad0f9902009-02-01 01:24:55 -08002454pull:
Herbert Xucb189782009-05-26 18:50:31 +00002455 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2456 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2457
2458 BUG_ON(skb->end - skb->tail < grow);
2459
2460 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2461
2462 skb->tail += grow;
2463 skb->data_len -= grow;
2464
2465 skb_shinfo(skb)->frags[0].page_offset += grow;
2466 skb_shinfo(skb)->frags[0].size -= grow;
2467
2468 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2469 put_page(skb_shinfo(skb)->frags[0].page);
2470 memmove(skb_shinfo(skb)->frags,
2471 skb_shinfo(skb)->frags + 1,
2472 --skb_shinfo(skb)->nr_frags);
2473 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002474 }
2475
Herbert Xud565b0a2008-12-15 23:38:52 -08002476ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002477 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002478
2479normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002480 ret = GRO_NORMAL;
2481 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002482}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002483EXPORT_SYMBOL(dev_gro_receive);
2484
2485static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2486{
2487 struct sk_buff *p;
2488
Herbert Xud1c76af2009-03-16 10:50:02 -07002489 if (netpoll_rx_on(skb))
2490 return GRO_NORMAL;
2491
Herbert Xu96e93ea2009-01-06 10:49:34 -08002492 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002493 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2494 && !compare_ether_header(skb_mac_header(p),
2495 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002496 NAPI_GRO_CB(p)->flush = 0;
2497 }
2498
2499 return dev_gro_receive(napi, skb);
2500}
Herbert Xu5d38a072009-01-04 16:13:40 -08002501
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002502int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002503{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002504 int err = NET_RX_SUCCESS;
2505
2506 switch (ret) {
2507 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002508 return netif_receive_skb(skb);
2509
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002510 case GRO_DROP:
2511 err = NET_RX_DROP;
2512 /* fall through */
2513
2514 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002515 kfree_skb(skb);
2516 break;
2517 }
2518
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002519 return err;
2520}
2521EXPORT_SYMBOL(napi_skb_finish);
2522
Herbert Xu78a478d2009-05-26 18:50:21 +00002523void skb_gro_reset_offset(struct sk_buff *skb)
2524{
2525 NAPI_GRO_CB(skb)->data_offset = 0;
2526 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002527 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002528
Herbert Xu78d3fd02009-05-26 18:50:23 +00002529 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002530 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002531 NAPI_GRO_CB(skb)->frag0 =
2532 page_address(skb_shinfo(skb)->frags[0].page) +
2533 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002534 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2535 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002536}
2537EXPORT_SYMBOL(skb_gro_reset_offset);
2538
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002539int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2540{
Herbert Xu86911732009-01-29 14:19:50 +00002541 skb_gro_reset_offset(skb);
2542
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002543 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002544}
2545EXPORT_SYMBOL(napi_gro_receive);
2546
Herbert Xu96e93ea2009-01-06 10:49:34 -08002547void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2548{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002549 __skb_pull(skb, skb_headlen(skb));
2550 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2551
2552 napi->skb = skb;
2553}
2554EXPORT_SYMBOL(napi_reuse_skb);
2555
Herbert Xu76620aa2009-04-16 02:02:07 -07002556struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002557{
2558 struct net_device *dev = napi->dev;
2559 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002560
2561 if (!skb) {
2562 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2563 if (!skb)
2564 goto out;
2565
2566 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002567
2568 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002569 }
2570
Herbert Xu96e93ea2009-01-06 10:49:34 -08002571out:
2572 return skb;
2573}
Herbert Xu76620aa2009-04-16 02:02:07 -07002574EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002575
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002576int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2577{
2578 int err = NET_RX_SUCCESS;
2579
2580 switch (ret) {
2581 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002582 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002583 skb->protocol = eth_type_trans(skb, napi->dev);
2584
2585 if (ret == GRO_NORMAL)
2586 return netif_receive_skb(skb);
2587
2588 skb_gro_pull(skb, -ETH_HLEN);
2589 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002590
2591 case GRO_DROP:
2592 err = NET_RX_DROP;
2593 /* fall through */
2594
2595 case GRO_MERGED_FREE:
2596 napi_reuse_skb(napi, skb);
2597 break;
2598 }
2599
2600 return err;
2601}
2602EXPORT_SYMBOL(napi_frags_finish);
2603
Herbert Xu76620aa2009-04-16 02:02:07 -07002604struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002605{
Herbert Xu76620aa2009-04-16 02:02:07 -07002606 struct sk_buff *skb = napi->skb;
2607 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002608 unsigned int hlen;
2609 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002610
2611 napi->skb = NULL;
2612
2613 skb_reset_mac_header(skb);
2614 skb_gro_reset_offset(skb);
2615
Herbert Xua5b1cf22009-05-26 18:50:28 +00002616 off = skb_gro_offset(skb);
2617 hlen = off + sizeof(*eth);
2618 eth = skb_gro_header_fast(skb, off);
2619 if (skb_gro_header_hard(skb, hlen)) {
2620 eth = skb_gro_header_slow(skb, hlen, off);
2621 if (unlikely(!eth)) {
2622 napi_reuse_skb(napi, skb);
2623 skb = NULL;
2624 goto out;
2625 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002626 }
2627
2628 skb_gro_pull(skb, sizeof(*eth));
2629
2630 /*
2631 * This works because the only protocols we care about don't require
2632 * special handling. We'll fix it up properly at the end.
2633 */
2634 skb->protocol = eth->h_proto;
2635
2636out:
2637 return skb;
2638}
2639EXPORT_SYMBOL(napi_frags_skb);
2640
2641int napi_gro_frags(struct napi_struct *napi)
2642{
2643 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002644
2645 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002646 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002647
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002648 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002649}
2650EXPORT_SYMBOL(napi_gro_frags);
2651
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002652static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653{
2654 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2656 unsigned long start_time = jiffies;
2657
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002658 napi->weight = weight_p;
2659 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 local_irq_disable();
2663 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002664 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002665 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002666 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002667 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002668 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 local_irq_enable();
2670
Herbert Xu8f1ead22009-03-26 00:59:10 -07002671 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002672 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002674 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
2676
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002677/**
2678 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002679 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002680 *
2681 * The entry's receive function will be scheduled to run
2682 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002683void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002684{
2685 unsigned long flags;
2686
2687 local_irq_save(flags);
2688 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2689 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2690 local_irq_restore(flags);
2691}
2692EXPORT_SYMBOL(__napi_schedule);
2693
Herbert Xud565b0a2008-12-15 23:38:52 -08002694void __napi_complete(struct napi_struct *n)
2695{
2696 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2697 BUG_ON(n->gro_list);
2698
2699 list_del(&n->poll_list);
2700 smp_mb__before_clear_bit();
2701 clear_bit(NAPI_STATE_SCHED, &n->state);
2702}
2703EXPORT_SYMBOL(__napi_complete);
2704
2705void napi_complete(struct napi_struct *n)
2706{
2707 unsigned long flags;
2708
2709 /*
2710 * don't let napi dequeue from the cpu poll list
2711 * just in case its running on a different cpu
2712 */
2713 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2714 return;
2715
2716 napi_gro_flush(n);
2717 local_irq_save(flags);
2718 __napi_complete(n);
2719 local_irq_restore(flags);
2720}
2721EXPORT_SYMBOL(napi_complete);
2722
2723void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2724 int (*poll)(struct napi_struct *, int), int weight)
2725{
2726 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002727 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002728 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002729 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002730 napi->poll = poll;
2731 napi->weight = weight;
2732 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002733 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002734#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002735 spin_lock_init(&napi->poll_lock);
2736 napi->poll_owner = -1;
2737#endif
2738 set_bit(NAPI_STATE_SCHED, &napi->state);
2739}
2740EXPORT_SYMBOL(netif_napi_add);
2741
2742void netif_napi_del(struct napi_struct *napi)
2743{
2744 struct sk_buff *skb, *next;
2745
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002746 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002747 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002748
2749 for (skb = napi->gro_list; skb; skb = next) {
2750 next = skb->next;
2751 skb->next = NULL;
2752 kfree_skb(skb);
2753 }
2754
2755 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002756 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002757}
2758EXPORT_SYMBOL(netif_napi_del);
2759
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761static void net_rx_action(struct softirq_action *h)
2762{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002763 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002764 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002765 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002766 void *have;
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 local_irq_disable();
2769
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002770 while (!list_empty(list)) {
2771 struct napi_struct *n;
2772 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002774 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002775 * Allow this to run for 2 jiffies since which will allow
2776 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002777 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002778 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 goto softnet_break;
2780
2781 local_irq_enable();
2782
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002783 /* Even though interrupts have been re-enabled, this
2784 * access is safe because interrupts can only add new
2785 * entries to the tail of this list, and only ->poll()
2786 * calls can remove this head entry from the list.
2787 */
2788 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002790 have = netpoll_poll_lock(n);
2791
2792 weight = n->weight;
2793
David S. Miller0a7606c2007-10-29 21:28:47 -07002794 /* This NAPI_STATE_SCHED test is for avoiding a race
2795 * with netpoll's poll_napi(). Only the entity which
2796 * obtains the lock and sees NAPI_STATE_SCHED set will
2797 * actually make the ->poll() call. Therefore we avoid
2798 * accidently calling ->poll() when NAPI is not scheduled.
2799 */
2800 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002801 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002802 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002803 trace_napi_poll(n);
2804 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002805
2806 WARN_ON_ONCE(work > weight);
2807
2808 budget -= work;
2809
2810 local_irq_disable();
2811
2812 /* Drivers must not modify the NAPI state if they
2813 * consume the entire weight. In such cases this code
2814 * still "owns" the NAPI instance and therefore can
2815 * move the instance around on the list at-will.
2816 */
David S. Millerfed17f32008-01-07 21:00:40 -08002817 if (unlikely(work == weight)) {
2818 if (unlikely(napi_disable_pending(n)))
2819 __napi_complete(n);
2820 else
2821 list_move_tail(&n->poll_list, list);
2822 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002823
2824 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 }
2826out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002827 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002828
Chris Leechdb217332006-06-17 21:24:58 -07002829#ifdef CONFIG_NET_DMA
2830 /*
2831 * There may not be any more sk_buffs coming right now, so push
2832 * any pending DMA copies to hardware
2833 */
Dan Williams2ba05622009-01-06 11:38:14 -07002834 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002835#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 return;
2838
2839softnet_break:
2840 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2841 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2842 goto out;
2843}
2844
2845static gifconf_func_t * gifconf_list [NPROTO];
2846
2847/**
2848 * register_gifconf - register a SIOCGIF handler
2849 * @family: Address family
2850 * @gifconf: Function handler
2851 *
2852 * Register protocol dependent address dumping routines. The handler
2853 * that is passed must not be freed or reused until it has been replaced
2854 * by another handler.
2855 */
2856int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2857{
2858 if (family >= NPROTO)
2859 return -EINVAL;
2860 gifconf_list[family] = gifconf;
2861 return 0;
2862}
2863
2864
2865/*
2866 * Map an interface index to its name (SIOCGIFNAME)
2867 */
2868
2869/*
2870 * We need this ioctl for efficient implementation of the
2871 * if_indextoname() function required by the IPv6 API. Without
2872 * it, we would have to search all the interfaces to find a
2873 * match. --pb
2874 */
2875
Eric W. Biederman881d9662007-09-17 11:56:21 -07002876static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877{
2878 struct net_device *dev;
2879 struct ifreq ifr;
2880
2881 /*
2882 * Fetch the caller's info block.
2883 */
2884
2885 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2886 return -EFAULT;
2887
2888 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002889 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 if (!dev) {
2891 read_unlock(&dev_base_lock);
2892 return -ENODEV;
2893 }
2894
2895 strcpy(ifr.ifr_name, dev->name);
2896 read_unlock(&dev_base_lock);
2897
2898 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2899 return -EFAULT;
2900 return 0;
2901}
2902
2903/*
2904 * Perform a SIOCGIFCONF call. This structure will change
2905 * size eventually, and there is nothing I can do about it.
2906 * Thus we will need a 'compatibility mode'.
2907 */
2908
Eric W. Biederman881d9662007-09-17 11:56:21 -07002909static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
2911 struct ifconf ifc;
2912 struct net_device *dev;
2913 char __user *pos;
2914 int len;
2915 int total;
2916 int i;
2917
2918 /*
2919 * Fetch the caller's info block.
2920 */
2921
2922 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2923 return -EFAULT;
2924
2925 pos = ifc.ifc_buf;
2926 len = ifc.ifc_len;
2927
2928 /*
2929 * Loop over the interfaces, and write an info block for each.
2930 */
2931
2932 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002933 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 for (i = 0; i < NPROTO; i++) {
2935 if (gifconf_list[i]) {
2936 int done;
2937 if (!pos)
2938 done = gifconf_list[i](dev, NULL, 0);
2939 else
2940 done = gifconf_list[i](dev, pos + total,
2941 len - total);
2942 if (done < 0)
2943 return -EFAULT;
2944 total += done;
2945 }
2946 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 /*
2950 * All done. Write the updated control block back to the caller.
2951 */
2952 ifc.ifc_len = total;
2953
2954 /*
2955 * Both BSD and Solaris return 0 here, so we do too.
2956 */
2957 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2958}
2959
2960#ifdef CONFIG_PROC_FS
2961/*
2962 * This is invoked by the /proc filesystem handler to display a device
2963 * in detail.
2964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002966 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967{
Denis V. Luneve372c412007-11-19 22:31:54 -08002968 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002969 loff_t off;
2970 struct net_device *dev;
2971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002973 if (!*pos)
2974 return SEQ_START_TOKEN;
2975
2976 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002977 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002978 if (off++ == *pos)
2979 return dev;
2980
2981 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
2984void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2985{
Denis V. Luneve372c412007-11-19 22:31:54 -08002986 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002988 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002989 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990}
2991
2992void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002993 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994{
2995 read_unlock(&dev_base_lock);
2996}
2997
2998static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2999{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003000 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Rusty Russell5a1b5892007-04-28 21:04:03 -07003002 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3003 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3004 dev->name, stats->rx_bytes, stats->rx_packets,
3005 stats->rx_errors,
3006 stats->rx_dropped + stats->rx_missed_errors,
3007 stats->rx_fifo_errors,
3008 stats->rx_length_errors + stats->rx_over_errors +
3009 stats->rx_crc_errors + stats->rx_frame_errors,
3010 stats->rx_compressed, stats->multicast,
3011 stats->tx_bytes, stats->tx_packets,
3012 stats->tx_errors, stats->tx_dropped,
3013 stats->tx_fifo_errors, stats->collisions,
3014 stats->tx_carrier_errors +
3015 stats->tx_aborted_errors +
3016 stats->tx_window_errors +
3017 stats->tx_heartbeat_errors,
3018 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019}
3020
3021/*
3022 * Called from the PROCfs module. This now uses the new arbitrary sized
3023 * /proc/net interface to create /proc/net/dev
3024 */
3025static int dev_seq_show(struct seq_file *seq, void *v)
3026{
3027 if (v == SEQ_START_TOKEN)
3028 seq_puts(seq, "Inter-| Receive "
3029 " | Transmit\n"
3030 " face |bytes packets errs drop fifo frame "
3031 "compressed multicast|bytes packets errs "
3032 "drop fifo colls carrier compressed\n");
3033 else
3034 dev_seq_printf_stats(seq, v);
3035 return 0;
3036}
3037
3038static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3039{
3040 struct netif_rx_stats *rc = NULL;
3041
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003042 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003043 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 rc = &per_cpu(netdev_rx_stat, *pos);
3045 break;
3046 } else
3047 ++*pos;
3048 return rc;
3049}
3050
3051static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3052{
3053 return softnet_get_online(pos);
3054}
3055
3056static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3057{
3058 ++*pos;
3059 return softnet_get_online(pos);
3060}
3061
3062static void softnet_seq_stop(struct seq_file *seq, void *v)
3063{
3064}
3065
3066static int softnet_seq_show(struct seq_file *seq, void *v)
3067{
3068 struct netif_rx_stats *s = v;
3069
3070 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003071 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003072 0, 0, 0, 0, /* was fastroute */
3073 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 return 0;
3075}
3076
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003077static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 .start = dev_seq_start,
3079 .next = dev_seq_next,
3080 .stop = dev_seq_stop,
3081 .show = dev_seq_show,
3082};
3083
3084static int dev_seq_open(struct inode *inode, struct file *file)
3085{
Denis V. Luneve372c412007-11-19 22:31:54 -08003086 return seq_open_net(inode, file, &dev_seq_ops,
3087 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088}
3089
Arjan van de Ven9a321442007-02-12 00:55:35 -08003090static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 .owner = THIS_MODULE,
3092 .open = dev_seq_open,
3093 .read = seq_read,
3094 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003095 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096};
3097
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003098static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 .start = softnet_seq_start,
3100 .next = softnet_seq_next,
3101 .stop = softnet_seq_stop,
3102 .show = softnet_seq_show,
3103};
3104
3105static int softnet_seq_open(struct inode *inode, struct file *file)
3106{
3107 return seq_open(file, &softnet_seq_ops);
3108}
3109
Arjan van de Ven9a321442007-02-12 00:55:35 -08003110static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 .owner = THIS_MODULE,
3112 .open = softnet_seq_open,
3113 .read = seq_read,
3114 .llseek = seq_lseek,
3115 .release = seq_release,
3116};
3117
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003118static void *ptype_get_idx(loff_t pos)
3119{
3120 struct packet_type *pt = NULL;
3121 loff_t i = 0;
3122 int t;
3123
3124 list_for_each_entry_rcu(pt, &ptype_all, list) {
3125 if (i == pos)
3126 return pt;
3127 ++i;
3128 }
3129
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003130 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003131 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3132 if (i == pos)
3133 return pt;
3134 ++i;
3135 }
3136 }
3137 return NULL;
3138}
3139
3140static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003141 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003142{
3143 rcu_read_lock();
3144 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3145}
3146
3147static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3148{
3149 struct packet_type *pt;
3150 struct list_head *nxt;
3151 int hash;
3152
3153 ++*pos;
3154 if (v == SEQ_START_TOKEN)
3155 return ptype_get_idx(0);
3156
3157 pt = v;
3158 nxt = pt->list.next;
3159 if (pt->type == htons(ETH_P_ALL)) {
3160 if (nxt != &ptype_all)
3161 goto found;
3162 hash = 0;
3163 nxt = ptype_base[0].next;
3164 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003165 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003166
3167 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003168 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003169 return NULL;
3170 nxt = ptype_base[hash].next;
3171 }
3172found:
3173 return list_entry(nxt, struct packet_type, list);
3174}
3175
3176static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003177 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003178{
3179 rcu_read_unlock();
3180}
3181
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003182static int ptype_seq_show(struct seq_file *seq, void *v)
3183{
3184 struct packet_type *pt = v;
3185
3186 if (v == SEQ_START_TOKEN)
3187 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003188 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003189 if (pt->type == htons(ETH_P_ALL))
3190 seq_puts(seq, "ALL ");
3191 else
3192 seq_printf(seq, "%04x", ntohs(pt->type));
3193
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003194 seq_printf(seq, " %-8s %pF\n",
3195 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003196 }
3197
3198 return 0;
3199}
3200
3201static const struct seq_operations ptype_seq_ops = {
3202 .start = ptype_seq_start,
3203 .next = ptype_seq_next,
3204 .stop = ptype_seq_stop,
3205 .show = ptype_seq_show,
3206};
3207
3208static int ptype_seq_open(struct inode *inode, struct file *file)
3209{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003210 return seq_open_net(inode, file, &ptype_seq_ops,
3211 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003212}
3213
3214static const struct file_operations ptype_seq_fops = {
3215 .owner = THIS_MODULE,
3216 .open = ptype_seq_open,
3217 .read = seq_read,
3218 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003219 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003220};
3221
3222
Pavel Emelyanov46650792007-10-08 20:38:39 -07003223static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224{
3225 int rc = -ENOMEM;
3226
Eric W. Biederman881d9662007-09-17 11:56:21 -07003227 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003229 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003231 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003232 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003233
Eric W. Biederman881d9662007-09-17 11:56:21 -07003234 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003235 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 rc = 0;
3237out:
3238 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003239out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003240 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003242 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003244 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 goto out;
3246}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003247
Pavel Emelyanov46650792007-10-08 20:38:39 -07003248static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003249{
3250 wext_proc_exit(net);
3251
3252 proc_net_remove(net, "ptype");
3253 proc_net_remove(net, "softnet_stat");
3254 proc_net_remove(net, "dev");
3255}
3256
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003257static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003258 .init = dev_proc_net_init,
3259 .exit = dev_proc_net_exit,
3260};
3261
3262static int __init dev_proc_init(void)
3263{
3264 return register_pernet_subsys(&dev_proc_ops);
3265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266#else
3267#define dev_proc_init() 0
3268#endif /* CONFIG_PROC_FS */
3269
3270
3271/**
3272 * netdev_set_master - set up master/slave pair
3273 * @slave: slave device
3274 * @master: new master device
3275 *
3276 * Changes the master device of the slave. Pass %NULL to break the
3277 * bonding. The caller must hold the RTNL semaphore. On a failure
3278 * a negative errno code is returned. On success the reference counts
3279 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3280 * function returns zero.
3281 */
3282int netdev_set_master(struct net_device *slave, struct net_device *master)
3283{
3284 struct net_device *old = slave->master;
3285
3286 ASSERT_RTNL();
3287
3288 if (master) {
3289 if (old)
3290 return -EBUSY;
3291 dev_hold(master);
3292 }
3293
3294 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003295
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 synchronize_net();
3297
3298 if (old)
3299 dev_put(old);
3300
3301 if (master)
3302 slave->flags |= IFF_SLAVE;
3303 else
3304 slave->flags &= ~IFF_SLAVE;
3305
3306 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3307 return 0;
3308}
3309
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003310static void dev_change_rx_flags(struct net_device *dev, int flags)
3311{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003312 const struct net_device_ops *ops = dev->netdev_ops;
3313
3314 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3315 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003316}
3317
Wang Chendad9b332008-06-18 01:48:28 -07003318static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003319{
3320 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003321 uid_t uid;
3322 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003323
Patrick McHardy24023452007-07-14 18:51:31 -07003324 ASSERT_RTNL();
3325
Wang Chendad9b332008-06-18 01:48:28 -07003326 dev->flags |= IFF_PROMISC;
3327 dev->promiscuity += inc;
3328 if (dev->promiscuity == 0) {
3329 /*
3330 * Avoid overflow.
3331 * If inc causes overflow, untouch promisc and return error.
3332 */
3333 if (inc < 0)
3334 dev->flags &= ~IFF_PROMISC;
3335 else {
3336 dev->promiscuity -= inc;
3337 printk(KERN_WARNING "%s: promiscuity touches roof, "
3338 "set promiscuity failed, promiscuity feature "
3339 "of device might be broken.\n", dev->name);
3340 return -EOVERFLOW;
3341 }
3342 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003343 if (dev->flags != old_flags) {
3344 printk(KERN_INFO "device %s %s promiscuous mode\n",
3345 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3346 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003347 if (audit_enabled) {
3348 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003349 audit_log(current->audit_context, GFP_ATOMIC,
3350 AUDIT_ANOM_PROMISCUOUS,
3351 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3352 dev->name, (dev->flags & IFF_PROMISC),
3353 (old_flags & IFF_PROMISC),
3354 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003355 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003356 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003357 }
Patrick McHardy24023452007-07-14 18:51:31 -07003358
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003359 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003360 }
Wang Chendad9b332008-06-18 01:48:28 -07003361 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003362}
3363
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364/**
3365 * dev_set_promiscuity - update promiscuity count on a device
3366 * @dev: device
3367 * @inc: modifier
3368 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003369 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 * remains above zero the interface remains promiscuous. Once it hits zero
3371 * the device reverts back to normal filtering operation. A negative inc
3372 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003373 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 */
Wang Chendad9b332008-06-18 01:48:28 -07003375int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376{
3377 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003378 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Wang Chendad9b332008-06-18 01:48:28 -07003380 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003381 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003382 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003383 if (dev->flags != old_flags)
3384 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003385 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386}
3387
3388/**
3389 * dev_set_allmulti - update allmulti count on a device
3390 * @dev: device
3391 * @inc: modifier
3392 *
3393 * Add or remove reception of all multicast frames to a device. While the
3394 * count in the device remains above zero the interface remains listening
3395 * to all interfaces. Once it hits zero the device reverts back to normal
3396 * filtering operation. A negative @inc value is used to drop the counter
3397 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003398 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 */
3400
Wang Chendad9b332008-06-18 01:48:28 -07003401int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
3403 unsigned short old_flags = dev->flags;
3404
Patrick McHardy24023452007-07-14 18:51:31 -07003405 ASSERT_RTNL();
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003408 dev->allmulti += inc;
3409 if (dev->allmulti == 0) {
3410 /*
3411 * Avoid overflow.
3412 * If inc causes overflow, untouch allmulti and return error.
3413 */
3414 if (inc < 0)
3415 dev->flags &= ~IFF_ALLMULTI;
3416 else {
3417 dev->allmulti -= inc;
3418 printk(KERN_WARNING "%s: allmulti touches roof, "
3419 "set allmulti failed, allmulti feature of "
3420 "device might be broken.\n", dev->name);
3421 return -EOVERFLOW;
3422 }
3423 }
Patrick McHardy24023452007-07-14 18:51:31 -07003424 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003425 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003426 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003427 }
Wang Chendad9b332008-06-18 01:48:28 -07003428 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003429}
3430
3431/*
3432 * Upload unicast and multicast address lists to device and
3433 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003434 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003435 * are present.
3436 */
3437void __dev_set_rx_mode(struct net_device *dev)
3438{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003439 const struct net_device_ops *ops = dev->netdev_ops;
3440
Patrick McHardy4417da62007-06-27 01:28:10 -07003441 /* dev_open will call this function so the list will stay sane. */
3442 if (!(dev->flags&IFF_UP))
3443 return;
3444
3445 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003446 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003447
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003448 if (ops->ndo_set_rx_mode)
3449 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003450 else {
3451 /* Unicast addresses changes may only happen under the rtnl,
3452 * therefore calling __dev_set_promiscuity here is safe.
3453 */
3454 if (dev->uc_count > 0 && !dev->uc_promisc) {
3455 __dev_set_promiscuity(dev, 1);
3456 dev->uc_promisc = 1;
3457 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3458 __dev_set_promiscuity(dev, -1);
3459 dev->uc_promisc = 0;
3460 }
3461
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003462 if (ops->ndo_set_multicast_list)
3463 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003464 }
3465}
3466
3467void dev_set_rx_mode(struct net_device *dev)
3468{
David S. Millerb9e40852008-07-15 00:15:08 -07003469 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003470 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003471 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472}
3473
Jiri Pirkof001fde2009-05-05 02:48:28 +00003474/* hw addresses list handling functions */
3475
3476static int __hw_addr_add(struct list_head *list, unsigned char *addr,
3477 int addr_len, unsigned char addr_type)
3478{
3479 struct netdev_hw_addr *ha;
3480 int alloc_size;
3481
3482 if (addr_len > MAX_ADDR_LEN)
3483 return -EINVAL;
3484
3485 alloc_size = sizeof(*ha);
3486 if (alloc_size < L1_CACHE_BYTES)
3487 alloc_size = L1_CACHE_BYTES;
3488 ha = kmalloc(alloc_size, GFP_ATOMIC);
3489 if (!ha)
3490 return -ENOMEM;
3491 memcpy(ha->addr, addr, addr_len);
3492 ha->type = addr_type;
3493 list_add_tail_rcu(&ha->list, list);
3494 return 0;
3495}
3496
3497static void ha_rcu_free(struct rcu_head *head)
3498{
3499 struct netdev_hw_addr *ha;
3500
3501 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3502 kfree(ha);
3503}
3504
3505static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
3506 int addr_len, unsigned char addr_type,
3507 int ignore_index)
3508{
3509 struct netdev_hw_addr *ha;
3510 int i = 0;
3511
3512 list_for_each_entry(ha, list, list) {
3513 if (i++ != ignore_index &&
3514 !memcmp(ha->addr, addr, addr_len) &&
3515 (ha->type == addr_type || !addr_type)) {
3516 list_del_rcu(&ha->list);
3517 call_rcu(&ha->rcu_head, ha_rcu_free);
3518 return 0;
3519 }
3520 }
3521 return -ENOENT;
3522}
3523
3524static int __hw_addr_add_multiple_ii(struct list_head *to_list,
3525 struct list_head *from_list,
3526 int addr_len, unsigned char addr_type,
3527 int ignore_index)
3528{
3529 int err;
3530 struct netdev_hw_addr *ha, *ha2;
3531 unsigned char type;
3532
3533 list_for_each_entry(ha, from_list, list) {
3534 type = addr_type ? addr_type : ha->type;
3535 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3536 if (err)
3537 goto unroll;
3538 }
3539 return 0;
3540
3541unroll:
3542 list_for_each_entry(ha2, from_list, list) {
3543 if (ha2 == ha)
3544 break;
3545 type = addr_type ? addr_type : ha2->type;
3546 __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
3547 ignore_index);
3548 }
3549 return err;
3550}
3551
3552static void __hw_addr_del_multiple_ii(struct list_head *to_list,
3553 struct list_head *from_list,
3554 int addr_len, unsigned char addr_type,
3555 int ignore_index)
3556{
3557 struct netdev_hw_addr *ha;
3558 unsigned char type;
3559
3560 list_for_each_entry(ha, from_list, list) {
3561 type = addr_type ? addr_type : ha->type;
3562 __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
3563 ignore_index);
3564 }
3565}
3566
3567static void __hw_addr_flush(struct list_head *list)
3568{
3569 struct netdev_hw_addr *ha, *tmp;
3570
3571 list_for_each_entry_safe(ha, tmp, list, list) {
3572 list_del_rcu(&ha->list);
3573 call_rcu(&ha->rcu_head, ha_rcu_free);
3574 }
3575}
3576
3577/* Device addresses handling functions */
3578
3579static void dev_addr_flush(struct net_device *dev)
3580{
3581 /* rtnl_mutex must be held here */
3582
3583 __hw_addr_flush(&dev->dev_addr_list);
3584 dev->dev_addr = NULL;
3585}
3586
3587static int dev_addr_init(struct net_device *dev)
3588{
3589 unsigned char addr[MAX_ADDR_LEN];
3590 struct netdev_hw_addr *ha;
3591 int err;
3592
3593 /* rtnl_mutex must be held here */
3594
3595 INIT_LIST_HEAD(&dev->dev_addr_list);
3596 memset(addr, 0, sizeof(*addr));
3597 err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
3598 NETDEV_HW_ADDR_T_LAN);
3599 if (!err) {
3600 /*
3601 * Get the first (previously created) address from the list
3602 * and set dev_addr pointer to this location.
3603 */
3604 ha = list_first_entry(&dev->dev_addr_list,
3605 struct netdev_hw_addr, list);
3606 dev->dev_addr = ha->addr;
3607 }
3608 return err;
3609}
3610
3611/**
3612 * dev_addr_add - Add a device address
3613 * @dev: device
3614 * @addr: address to add
3615 * @addr_type: address type
3616 *
3617 * Add a device address to the device or increase the reference count if
3618 * it already exists.
3619 *
3620 * The caller must hold the rtnl_mutex.
3621 */
3622int dev_addr_add(struct net_device *dev, unsigned char *addr,
3623 unsigned char addr_type)
3624{
3625 int err;
3626
3627 ASSERT_RTNL();
3628
3629 err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
3630 addr_type);
3631 if (!err)
3632 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3633 return err;
3634}
3635EXPORT_SYMBOL(dev_addr_add);
3636
3637/**
3638 * dev_addr_del - Release a device address.
3639 * @dev: device
3640 * @addr: address to delete
3641 * @addr_type: address type
3642 *
3643 * Release reference to a device address and remove it from the device
3644 * if the reference count drops to zero.
3645 *
3646 * The caller must hold the rtnl_mutex.
3647 */
3648int dev_addr_del(struct net_device *dev, unsigned char *addr,
3649 unsigned char addr_type)
3650{
3651 int err;
3652
3653 ASSERT_RTNL();
3654
3655 err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
3656 addr_type, 0);
3657 if (!err)
3658 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3659 return err;
3660}
3661EXPORT_SYMBOL(dev_addr_del);
3662
3663/**
3664 * dev_addr_add_multiple - Add device addresses from another device
3665 * @to_dev: device to which addresses will be added
3666 * @from_dev: device from which addresses will be added
3667 * @addr_type: address type - 0 means type will be used from from_dev
3668 *
3669 * Add device addresses of the one device to another.
3670 **
3671 * The caller must hold the rtnl_mutex.
3672 */
3673int dev_addr_add_multiple(struct net_device *to_dev,
3674 struct net_device *from_dev,
3675 unsigned char addr_type)
3676{
3677 int err;
3678
3679 ASSERT_RTNL();
3680
3681 if (from_dev->addr_len != to_dev->addr_len)
3682 return -EINVAL;
3683 err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
3684 &from_dev->dev_addr_list,
3685 to_dev->addr_len, addr_type, 0);
3686 if (!err)
3687 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3688 return err;
3689}
3690EXPORT_SYMBOL(dev_addr_add_multiple);
3691
3692/**
3693 * dev_addr_del_multiple - Delete device addresses by another device
3694 * @to_dev: device where the addresses will be deleted
3695 * @from_dev: device by which addresses the addresses will be deleted
3696 * @addr_type: address type - 0 means type will used from from_dev
3697 *
3698 * Deletes addresses in to device by the list of addresses in from device.
3699 *
3700 * The caller must hold the rtnl_mutex.
3701 */
3702int dev_addr_del_multiple(struct net_device *to_dev,
3703 struct net_device *from_dev,
3704 unsigned char addr_type)
3705{
3706 ASSERT_RTNL();
3707
3708 if (from_dev->addr_len != to_dev->addr_len)
3709 return -EINVAL;
3710 __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
3711 &from_dev->dev_addr_list,
3712 to_dev->addr_len, addr_type, 0);
3713 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3714 return 0;
3715}
3716EXPORT_SYMBOL(dev_addr_del_multiple);
3717
3718/* unicast and multicast addresses handling functions */
3719
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003720int __dev_addr_delete(struct dev_addr_list **list, int *count,
3721 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003722{
3723 struct dev_addr_list *da;
3724
3725 for (; (da = *list) != NULL; list = &da->next) {
3726 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3727 alen == da->da_addrlen) {
3728 if (glbl) {
3729 int old_glbl = da->da_gusers;
3730 da->da_gusers = 0;
3731 if (old_glbl == 0)
3732 break;
3733 }
3734 if (--da->da_users)
3735 return 0;
3736
3737 *list = da->next;
3738 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003739 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003740 return 0;
3741 }
3742 }
3743 return -ENOENT;
3744}
3745
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003746int __dev_addr_add(struct dev_addr_list **list, int *count,
3747 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003748{
3749 struct dev_addr_list *da;
3750
3751 for (da = *list; da != NULL; da = da->next) {
3752 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3753 da->da_addrlen == alen) {
3754 if (glbl) {
3755 int old_glbl = da->da_gusers;
3756 da->da_gusers = 1;
3757 if (old_glbl)
3758 return 0;
3759 }
3760 da->da_users++;
3761 return 0;
3762 }
3763 }
3764
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003765 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003766 if (da == NULL)
3767 return -ENOMEM;
3768 memcpy(da->da_addr, addr, alen);
3769 da->da_addrlen = alen;
3770 da->da_users = 1;
3771 da->da_gusers = glbl ? 1 : 0;
3772 da->next = *list;
3773 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003774 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003775 return 0;
3776}
3777
Patrick McHardy4417da62007-06-27 01:28:10 -07003778/**
3779 * dev_unicast_delete - Release secondary unicast address.
3780 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003781 * @addr: address to delete
3782 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003783 *
3784 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003785 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003786 *
3787 * The caller must hold the rtnl_mutex.
3788 */
3789int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3790{
3791 int err;
3792
3793 ASSERT_RTNL();
3794
David S. Millerb9e40852008-07-15 00:15:08 -07003795 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003796 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3797 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003798 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003799 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003800 return err;
3801}
3802EXPORT_SYMBOL(dev_unicast_delete);
3803
3804/**
3805 * dev_unicast_add - add a secondary unicast address
3806 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003807 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003808 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003809 *
3810 * Add a secondary unicast address to the device or increase
3811 * the reference count if it already exists.
3812 *
3813 * The caller must hold the rtnl_mutex.
3814 */
3815int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3816{
3817 int err;
3818
3819 ASSERT_RTNL();
3820
David S. Millerb9e40852008-07-15 00:15:08 -07003821 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003822 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3823 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003824 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003825 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003826 return err;
3827}
3828EXPORT_SYMBOL(dev_unicast_add);
3829
Chris Leeche83a2ea2008-01-31 16:53:23 -08003830int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3831 struct dev_addr_list **from, int *from_count)
3832{
3833 struct dev_addr_list *da, *next;
3834 int err = 0;
3835
3836 da = *from;
3837 while (da != NULL) {
3838 next = da->next;
3839 if (!da->da_synced) {
3840 err = __dev_addr_add(to, to_count,
3841 da->da_addr, da->da_addrlen, 0);
3842 if (err < 0)
3843 break;
3844 da->da_synced = 1;
3845 da->da_users++;
3846 } else if (da->da_users == 1) {
3847 __dev_addr_delete(to, to_count,
3848 da->da_addr, da->da_addrlen, 0);
3849 __dev_addr_delete(from, from_count,
3850 da->da_addr, da->da_addrlen, 0);
3851 }
3852 da = next;
3853 }
3854 return err;
3855}
3856
3857void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3858 struct dev_addr_list **from, int *from_count)
3859{
3860 struct dev_addr_list *da, *next;
3861
3862 da = *from;
3863 while (da != NULL) {
3864 next = da->next;
3865 if (da->da_synced) {
3866 __dev_addr_delete(to, to_count,
3867 da->da_addr, da->da_addrlen, 0);
3868 da->da_synced = 0;
3869 __dev_addr_delete(from, from_count,
3870 da->da_addr, da->da_addrlen, 0);
3871 }
3872 da = next;
3873 }
3874}
3875
3876/**
3877 * dev_unicast_sync - Synchronize device's unicast list to another device
3878 * @to: destination device
3879 * @from: source device
3880 *
3881 * Add newly added addresses to the destination device and release
3882 * addresses that have no users left. The source device must be
3883 * locked by netif_tx_lock_bh.
3884 *
3885 * This function is intended to be called from the dev->set_rx_mode
3886 * function of layered software devices.
3887 */
3888int dev_unicast_sync(struct net_device *to, struct net_device *from)
3889{
3890 int err = 0;
3891
David S. Millerb9e40852008-07-15 00:15:08 -07003892 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003893 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3894 &from->uc_list, &from->uc_count);
3895 if (!err)
3896 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003897 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003898 return err;
3899}
3900EXPORT_SYMBOL(dev_unicast_sync);
3901
3902/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003903 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003904 * @to: destination device
3905 * @from: source device
3906 *
3907 * Remove all addresses that were added to the destination device by
3908 * dev_unicast_sync(). This function is intended to be called from the
3909 * dev->stop function of layered software devices.
3910 */
3911void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3912{
David S. Millerb9e40852008-07-15 00:15:08 -07003913 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003914 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003915
3916 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3917 &from->uc_list, &from->uc_count);
3918 __dev_set_rx_mode(to);
3919
David S. Millere308a5d2008-07-15 00:13:44 -07003920 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003921 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003922}
3923EXPORT_SYMBOL(dev_unicast_unsync);
3924
Denis Cheng12972622007-07-18 02:12:56 -07003925static void __dev_addr_discard(struct dev_addr_list **list)
3926{
3927 struct dev_addr_list *tmp;
3928
3929 while (*list != NULL) {
3930 tmp = *list;
3931 *list = tmp->next;
3932 if (tmp->da_users > tmp->da_gusers)
3933 printk("__dev_addr_discard: address leakage! "
3934 "da_users=%d\n", tmp->da_users);
3935 kfree(tmp);
3936 }
3937}
3938
Denis Cheng26cc2522007-07-18 02:12:03 -07003939static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003940{
David S. Millerb9e40852008-07-15 00:15:08 -07003941 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003942
Patrick McHardy4417da62007-06-27 01:28:10 -07003943 __dev_addr_discard(&dev->uc_list);
3944 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003945
Denis Cheng456ad752007-07-18 02:10:54 -07003946 __dev_addr_discard(&dev->mc_list);
3947 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003948
David S. Millerb9e40852008-07-15 00:15:08 -07003949 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003950}
3951
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07003952/**
3953 * dev_get_flags - get flags reported to userspace
3954 * @dev: device
3955 *
3956 * Get the combination of flag bits exported through APIs to userspace.
3957 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958unsigned dev_get_flags(const struct net_device *dev)
3959{
3960 unsigned flags;
3961
3962 flags = (dev->flags & ~(IFF_PROMISC |
3963 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003964 IFF_RUNNING |
3965 IFF_LOWER_UP |
3966 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967 (dev->gflags & (IFF_PROMISC |
3968 IFF_ALLMULTI));
3969
Stefan Rompfb00055a2006-03-20 17:09:11 -08003970 if (netif_running(dev)) {
3971 if (netif_oper_up(dev))
3972 flags |= IFF_RUNNING;
3973 if (netif_carrier_ok(dev))
3974 flags |= IFF_LOWER_UP;
3975 if (netif_dormant(dev))
3976 flags |= IFF_DORMANT;
3977 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978
3979 return flags;
3980}
3981
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07003982/**
3983 * dev_change_flags - change device settings
3984 * @dev: device
3985 * @flags: device state flags
3986 *
3987 * Change settings on device based state flags. The flags are
3988 * in the userspace exported format.
3989 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990int dev_change_flags(struct net_device *dev, unsigned flags)
3991{
Thomas Graf7c355f52007-06-05 16:03:03 -07003992 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 int old_flags = dev->flags;
3994
Patrick McHardy24023452007-07-14 18:51:31 -07003995 ASSERT_RTNL();
3996
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 /*
3998 * Set the flags on our device.
3999 */
4000
4001 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4002 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4003 IFF_AUTOMEDIA)) |
4004 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4005 IFF_ALLMULTI));
4006
4007 /*
4008 * Load in the correct multicast list now the flags have changed.
4009 */
4010
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004011 if ((old_flags ^ flags) & IFF_MULTICAST)
4012 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004013
Patrick McHardy4417da62007-06-27 01:28:10 -07004014 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015
4016 /*
4017 * Have we downed the interface. We handle IFF_UP ourselves
4018 * according to user attempts to set it, rather than blindly
4019 * setting it.
4020 */
4021
4022 ret = 0;
4023 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4024 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4025
4026 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004027 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 }
4029
4030 if (dev->flags & IFF_UP &&
4031 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4032 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004033 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034
4035 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4036 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4037 dev->gflags ^= IFF_PROMISC;
4038 dev_set_promiscuity(dev, inc);
4039 }
4040
4041 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4042 is important. Some (broken) drivers set IFF_PROMISC, when
4043 IFF_ALLMULTI is requested not asking us and not reporting.
4044 */
4045 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4046 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4047 dev->gflags ^= IFF_ALLMULTI;
4048 dev_set_allmulti(dev, inc);
4049 }
4050
Thomas Graf7c355f52007-06-05 16:03:03 -07004051 /* Exclude state transition flags, already notified */
4052 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4053 if (changes)
4054 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
4056 return ret;
4057}
4058
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004059/**
4060 * dev_set_mtu - Change maximum transfer unit
4061 * @dev: device
4062 * @new_mtu: new transfer unit
4063 *
4064 * Change the maximum transfer size of the network device.
4065 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066int dev_set_mtu(struct net_device *dev, int new_mtu)
4067{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004068 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 int err;
4070
4071 if (new_mtu == dev->mtu)
4072 return 0;
4073
4074 /* MTU must be positive. */
4075 if (new_mtu < 0)
4076 return -EINVAL;
4077
4078 if (!netif_device_present(dev))
4079 return -ENODEV;
4080
4081 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004082 if (ops->ndo_change_mtu)
4083 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 else
4085 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004086
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004088 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 return err;
4090}
4091
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004092/**
4093 * dev_set_mac_address - Change Media Access Control Address
4094 * @dev: device
4095 * @sa: new address
4096 *
4097 * Change the hardware (MAC) address of the device
4098 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4100{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004101 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 int err;
4103
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004104 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 return -EOPNOTSUPP;
4106 if (sa->sa_family != dev->type)
4107 return -EINVAL;
4108 if (!netif_device_present(dev))
4109 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004110 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004112 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 return err;
4114}
4115
4116/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004117 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004119static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120{
4121 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004122 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123
4124 if (!dev)
4125 return -ENODEV;
4126
4127 switch (cmd) {
4128 case SIOCGIFFLAGS: /* Get interface flags */
4129 ifr->ifr_flags = dev_get_flags(dev);
4130 return 0;
4131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 case SIOCGIFMETRIC: /* Get the metric on the interface
4133 (currently unused) */
4134 ifr->ifr_metric = 0;
4135 return 0;
4136
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 case SIOCGIFMTU: /* Get the MTU of a device */
4138 ifr->ifr_mtu = dev->mtu;
4139 return 0;
4140
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 case SIOCGIFHWADDR:
4142 if (!dev->addr_len)
4143 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4144 else
4145 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4146 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4147 ifr->ifr_hwaddr.sa_family = dev->type;
4148 return 0;
4149
Jeff Garzik14e3e072007-10-08 00:06:32 -07004150 case SIOCGIFSLAVE:
4151 err = -EINVAL;
4152 break;
4153
4154 case SIOCGIFMAP:
4155 ifr->ifr_map.mem_start = dev->mem_start;
4156 ifr->ifr_map.mem_end = dev->mem_end;
4157 ifr->ifr_map.base_addr = dev->base_addr;
4158 ifr->ifr_map.irq = dev->irq;
4159 ifr->ifr_map.dma = dev->dma;
4160 ifr->ifr_map.port = dev->if_port;
4161 return 0;
4162
4163 case SIOCGIFINDEX:
4164 ifr->ifr_ifindex = dev->ifindex;
4165 return 0;
4166
4167 case SIOCGIFTXQLEN:
4168 ifr->ifr_qlen = dev->tx_queue_len;
4169 return 0;
4170
4171 default:
4172 /* dev_ioctl() should ensure this case
4173 * is never reached
4174 */
4175 WARN_ON(1);
4176 err = -EINVAL;
4177 break;
4178
4179 }
4180 return err;
4181}
4182
4183/*
4184 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4185 */
4186static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4187{
4188 int err;
4189 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004190 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004191
4192 if (!dev)
4193 return -ENODEV;
4194
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004195 ops = dev->netdev_ops;
4196
Jeff Garzik14e3e072007-10-08 00:06:32 -07004197 switch (cmd) {
4198 case SIOCSIFFLAGS: /* Set interface flags */
4199 return dev_change_flags(dev, ifr->ifr_flags);
4200
4201 case SIOCSIFMETRIC: /* Set the metric on the interface
4202 (currently unused) */
4203 return -EOPNOTSUPP;
4204
4205 case SIOCSIFMTU: /* Set the MTU of a device */
4206 return dev_set_mtu(dev, ifr->ifr_mtu);
4207
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 case SIOCSIFHWADDR:
4209 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4210
4211 case SIOCSIFHWBROADCAST:
4212 if (ifr->ifr_hwaddr.sa_family != dev->type)
4213 return -EINVAL;
4214 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4215 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004216 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 return 0;
4218
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004220 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 if (!netif_device_present(dev))
4222 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004223 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 }
4225 return -EOPNOTSUPP;
4226
4227 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004228 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4230 return -EINVAL;
4231 if (!netif_device_present(dev))
4232 return -ENODEV;
4233 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4234 dev->addr_len, 1);
4235
4236 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004237 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4239 return -EINVAL;
4240 if (!netif_device_present(dev))
4241 return -ENODEV;
4242 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4243 dev->addr_len, 1);
4244
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 case SIOCSIFTXQLEN:
4246 if (ifr->ifr_qlen < 0)
4247 return -EINVAL;
4248 dev->tx_queue_len = ifr->ifr_qlen;
4249 return 0;
4250
4251 case SIOCSIFNAME:
4252 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4253 return dev_change_name(dev, ifr->ifr_newname);
4254
4255 /*
4256 * Unknown or private ioctl
4257 */
4258
4259 default:
4260 if ((cmd >= SIOCDEVPRIVATE &&
4261 cmd <= SIOCDEVPRIVATE + 15) ||
4262 cmd == SIOCBONDENSLAVE ||
4263 cmd == SIOCBONDRELEASE ||
4264 cmd == SIOCBONDSETHWADDR ||
4265 cmd == SIOCBONDSLAVEINFOQUERY ||
4266 cmd == SIOCBONDINFOQUERY ||
4267 cmd == SIOCBONDCHANGEACTIVE ||
4268 cmd == SIOCGMIIPHY ||
4269 cmd == SIOCGMIIREG ||
4270 cmd == SIOCSMIIREG ||
4271 cmd == SIOCBRADDIF ||
4272 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004273 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 cmd == SIOCWANDEV) {
4275 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004276 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004278 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 else
4280 err = -ENODEV;
4281 }
4282 } else
4283 err = -EINVAL;
4284
4285 }
4286 return err;
4287}
4288
4289/*
4290 * This function handles all "interface"-type I/O control requests. The actual
4291 * 'doing' part of this is dev_ifsioc above.
4292 */
4293
4294/**
4295 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004296 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 * @cmd: command to issue
4298 * @arg: pointer to a struct ifreq in user space
4299 *
4300 * Issue ioctl functions to devices. This is normally called by the
4301 * user space syscall interfaces but can sometimes be useful for
4302 * other purposes. The return value is the return from the syscall if
4303 * positive or a negative errno code on error.
4304 */
4305
Eric W. Biederman881d9662007-09-17 11:56:21 -07004306int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
4308 struct ifreq ifr;
4309 int ret;
4310 char *colon;
4311
4312 /* One special case: SIOCGIFCONF takes ifconf argument
4313 and requires shared lock, because it sleeps writing
4314 to user space.
4315 */
4316
4317 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004318 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004319 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004320 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 return ret;
4322 }
4323 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004324 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4327 return -EFAULT;
4328
4329 ifr.ifr_name[IFNAMSIZ-1] = 0;
4330
4331 colon = strchr(ifr.ifr_name, ':');
4332 if (colon)
4333 *colon = 0;
4334
4335 /*
4336 * See which interface the caller is talking about.
4337 */
4338
4339 switch (cmd) {
4340 /*
4341 * These ioctl calls:
4342 * - can be done by all.
4343 * - atomic and do not require locking.
4344 * - return a value
4345 */
4346 case SIOCGIFFLAGS:
4347 case SIOCGIFMETRIC:
4348 case SIOCGIFMTU:
4349 case SIOCGIFHWADDR:
4350 case SIOCGIFSLAVE:
4351 case SIOCGIFMAP:
4352 case SIOCGIFINDEX:
4353 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004354 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004356 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 read_unlock(&dev_base_lock);
4358 if (!ret) {
4359 if (colon)
4360 *colon = ':';
4361 if (copy_to_user(arg, &ifr,
4362 sizeof(struct ifreq)))
4363 ret = -EFAULT;
4364 }
4365 return ret;
4366
4367 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004368 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004370 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 rtnl_unlock();
4372 if (!ret) {
4373 if (colon)
4374 *colon = ':';
4375 if (copy_to_user(arg, &ifr,
4376 sizeof(struct ifreq)))
4377 ret = -EFAULT;
4378 }
4379 return ret;
4380
4381 /*
4382 * These ioctl calls:
4383 * - require superuser power.
4384 * - require strict serialization.
4385 * - return a value
4386 */
4387 case SIOCGMIIPHY:
4388 case SIOCGMIIREG:
4389 case SIOCSIFNAME:
4390 if (!capable(CAP_NET_ADMIN))
4391 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004392 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004394 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 rtnl_unlock();
4396 if (!ret) {
4397 if (colon)
4398 *colon = ':';
4399 if (copy_to_user(arg, &ifr,
4400 sizeof(struct ifreq)))
4401 ret = -EFAULT;
4402 }
4403 return ret;
4404
4405 /*
4406 * These ioctl calls:
4407 * - require superuser power.
4408 * - require strict serialization.
4409 * - do not return a value
4410 */
4411 case SIOCSIFFLAGS:
4412 case SIOCSIFMETRIC:
4413 case SIOCSIFMTU:
4414 case SIOCSIFMAP:
4415 case SIOCSIFHWADDR:
4416 case SIOCSIFSLAVE:
4417 case SIOCADDMULTI:
4418 case SIOCDELMULTI:
4419 case SIOCSIFHWBROADCAST:
4420 case SIOCSIFTXQLEN:
4421 case SIOCSMIIREG:
4422 case SIOCBONDENSLAVE:
4423 case SIOCBONDRELEASE:
4424 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425 case SIOCBONDCHANGEACTIVE:
4426 case SIOCBRADDIF:
4427 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004428 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429 if (!capable(CAP_NET_ADMIN))
4430 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004431 /* fall through */
4432 case SIOCBONDSLAVEINFOQUERY:
4433 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004434 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004436 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437 rtnl_unlock();
4438 return ret;
4439
4440 case SIOCGIFMEM:
4441 /* Get the per device memory space. We can add this but
4442 * currently do not support it */
4443 case SIOCSIFMEM:
4444 /* Set the per device memory buffer space.
4445 * Not applicable in our case */
4446 case SIOCSIFLINK:
4447 return -EINVAL;
4448
4449 /*
4450 * Unknown or private ioctl.
4451 */
4452 default:
4453 if (cmd == SIOCWANDEV ||
4454 (cmd >= SIOCDEVPRIVATE &&
4455 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004456 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004458 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 rtnl_unlock();
4460 if (!ret && copy_to_user(arg, &ifr,
4461 sizeof(struct ifreq)))
4462 ret = -EFAULT;
4463 return ret;
4464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004466 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004467 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468 return -EINVAL;
4469 }
4470}
4471
4472
4473/**
4474 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004475 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 *
4477 * Returns a suitable unique value for a new device interface
4478 * number. The caller must hold the rtnl semaphore or the
4479 * dev_base_lock to be sure it remains unique.
4480 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004481static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482{
4483 static int ifindex;
4484 for (;;) {
4485 if (++ifindex <= 0)
4486 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004487 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488 return ifindex;
4489 }
4490}
4491
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004493static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004495static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498}
4499
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004500static void rollback_registered(struct net_device *dev)
4501{
4502 BUG_ON(dev_boot_phase);
4503 ASSERT_RTNL();
4504
4505 /* Some devices call without registering for initialization unwind. */
4506 if (dev->reg_state == NETREG_UNINITIALIZED) {
4507 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4508 "was registered\n", dev->name, dev);
4509
4510 WARN_ON(1);
4511 return;
4512 }
4513
4514 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4515
4516 /* If device is running, close it first. */
4517 dev_close(dev);
4518
4519 /* And unlink it from device chain. */
4520 unlist_netdevice(dev);
4521
4522 dev->reg_state = NETREG_UNREGISTERING;
4523
4524 synchronize_net();
4525
4526 /* Shutdown queueing discipline. */
4527 dev_shutdown(dev);
4528
4529
4530 /* Notify protocols, that we are about to destroy
4531 this device. They should clean all the things.
4532 */
4533 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4534
4535 /*
4536 * Flush the unicast and multicast chains
4537 */
4538 dev_addr_discard(dev);
4539
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004540 if (dev->netdev_ops->ndo_uninit)
4541 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004542
4543 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004544 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004545
4546 /* Remove entries from kobject tree */
4547 netdev_unregister_kobject(dev);
4548
4549 synchronize_net();
4550
4551 dev_put(dev);
4552}
4553
David S. Millere8a04642008-07-17 00:34:19 -07004554static void __netdev_init_queue_locks_one(struct net_device *dev,
4555 struct netdev_queue *dev_queue,
4556 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004557{
4558 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004559 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004560 dev_queue->xmit_lock_owner = -1;
4561}
4562
4563static void netdev_init_queue_locks(struct net_device *dev)
4564{
David S. Millere8a04642008-07-17 00:34:19 -07004565 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4566 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004567}
4568
Herbert Xub63365a2008-10-23 01:11:29 -07004569unsigned long netdev_fix_features(unsigned long features, const char *name)
4570{
4571 /* Fix illegal SG+CSUM combinations. */
4572 if ((features & NETIF_F_SG) &&
4573 !(features & NETIF_F_ALL_CSUM)) {
4574 if (name)
4575 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4576 "checksum feature.\n", name);
4577 features &= ~NETIF_F_SG;
4578 }
4579
4580 /* TSO requires that SG is present as well. */
4581 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4582 if (name)
4583 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4584 "SG feature.\n", name);
4585 features &= ~NETIF_F_TSO;
4586 }
4587
4588 if (features & NETIF_F_UFO) {
4589 if (!(features & NETIF_F_GEN_CSUM)) {
4590 if (name)
4591 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4592 "since no NETIF_F_HW_CSUM feature.\n",
4593 name);
4594 features &= ~NETIF_F_UFO;
4595 }
4596
4597 if (!(features & NETIF_F_SG)) {
4598 if (name)
4599 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4600 "since no NETIF_F_SG feature.\n", name);
4601 features &= ~NETIF_F_UFO;
4602 }
4603 }
4604
4605 return features;
4606}
4607EXPORT_SYMBOL(netdev_fix_features);
4608
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609/**
4610 * register_netdevice - register a network device
4611 * @dev: device to register
4612 *
4613 * Take a completed network device structure and add it to the kernel
4614 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4615 * chain. 0 is returned on success. A negative errno code is returned
4616 * on a failure to set up the device, or if the name is a duplicate.
4617 *
4618 * Callers must hold the rtnl semaphore. You may want
4619 * register_netdev() instead of this.
4620 *
4621 * BUGS:
4622 * The locking appears insufficient to guarantee two parallel registers
4623 * will not get the same name.
4624 */
4625
4626int register_netdevice(struct net_device *dev)
4627{
4628 struct hlist_head *head;
4629 struct hlist_node *p;
4630 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004631 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
4633 BUG_ON(dev_boot_phase);
4634 ASSERT_RTNL();
4635
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004636 might_sleep();
4637
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 /* When net_device's are persistent, this will be fatal. */
4639 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004640 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641
David S. Millerf1f28aa2008-07-15 00:08:33 -07004642 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004643 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004644 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 dev->iflink = -1;
4647
4648 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004649 if (dev->netdev_ops->ndo_init) {
4650 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 if (ret) {
4652 if (ret > 0)
4653 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004654 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655 }
4656 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004657
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 if (!dev_valid_name(dev->name)) {
4659 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004660 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 }
4662
Eric W. Biederman881d9662007-09-17 11:56:21 -07004663 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 if (dev->iflink == -1)
4665 dev->iflink = dev->ifindex;
4666
4667 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004668 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 hlist_for_each(p, head) {
4670 struct net_device *d
4671 = hlist_entry(p, struct net_device, name_hlist);
4672 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4673 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004674 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004678 /* Fix illegal checksum combinations */
4679 if ((dev->features & NETIF_F_HW_CSUM) &&
4680 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4681 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4682 dev->name);
4683 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4684 }
4685
4686 if ((dev->features & NETIF_F_NO_CSUM) &&
4687 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4688 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4689 dev->name);
4690 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4691 }
4692
Herbert Xub63365a2008-10-23 01:11:29 -07004693 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004695 /* Enable software GSO if SG is supported. */
4696 if (dev->features & NETIF_F_SG)
4697 dev->features |= NETIF_F_GSO;
4698
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004699 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004700 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004701 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004702 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004703 dev->reg_state = NETREG_REGISTERED;
4704
Linus Torvalds1da177e2005-04-16 15:20:36 -07004705 /*
4706 * Default initial state at registry is that the
4707 * device is present.
4708 */
4709
4710 set_bit(__LINK_STATE_PRESENT, &dev->state);
4711
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004714 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715
4716 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004717 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004718 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004719 if (ret) {
4720 rollback_registered(dev);
4721 dev->reg_state = NETREG_UNREGISTERED;
4722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
4724out:
4725 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004726
4727err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004728 if (dev->netdev_ops->ndo_uninit)
4729 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004730 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731}
4732
4733/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004734 * init_dummy_netdev - init a dummy network device for NAPI
4735 * @dev: device to init
4736 *
4737 * This takes a network device structure and initialize the minimum
4738 * amount of fields so it can be used to schedule NAPI polls without
4739 * registering a full blown interface. This is to be used by drivers
4740 * that need to tie several hardware interfaces to a single NAPI
4741 * poll scheduler due to HW limitations.
4742 */
4743int init_dummy_netdev(struct net_device *dev)
4744{
4745 /* Clear everything. Note we don't initialize spinlocks
4746 * are they aren't supposed to be taken by any of the
4747 * NAPI code and this dummy netdev is supposed to be
4748 * only ever used for NAPI polls
4749 */
4750 memset(dev, 0, sizeof(struct net_device));
4751
4752 /* make sure we BUG if trying to hit standard
4753 * register/unregister code path
4754 */
4755 dev->reg_state = NETREG_DUMMY;
4756
4757 /* initialize the ref count */
4758 atomic_set(&dev->refcnt, 1);
4759
4760 /* NAPI wants this */
4761 INIT_LIST_HEAD(&dev->napi_list);
4762
4763 /* a dummy interface is started by default */
4764 set_bit(__LINK_STATE_PRESENT, &dev->state);
4765 set_bit(__LINK_STATE_START, &dev->state);
4766
4767 return 0;
4768}
4769EXPORT_SYMBOL_GPL(init_dummy_netdev);
4770
4771
4772/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 * register_netdev - register a network device
4774 * @dev: device to register
4775 *
4776 * Take a completed network device structure and add it to the kernel
4777 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4778 * chain. 0 is returned on success. A negative errno code is returned
4779 * on a failure to set up the device, or if the name is a duplicate.
4780 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004781 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 * and expands the device name if you passed a format string to
4783 * alloc_netdev.
4784 */
4785int register_netdev(struct net_device *dev)
4786{
4787 int err;
4788
4789 rtnl_lock();
4790
4791 /*
4792 * If the name is a format string the caller wants us to do a
4793 * name allocation.
4794 */
4795 if (strchr(dev->name, '%')) {
4796 err = dev_alloc_name(dev, dev->name);
4797 if (err < 0)
4798 goto out;
4799 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004800
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 err = register_netdevice(dev);
4802out:
4803 rtnl_unlock();
4804 return err;
4805}
4806EXPORT_SYMBOL(register_netdev);
4807
4808/*
4809 * netdev_wait_allrefs - wait until all references are gone.
4810 *
4811 * This is called when unregistering network devices.
4812 *
4813 * Any protocol or device that holds a reference should register
4814 * for netdevice notification, and cleanup and put back the
4815 * reference if they receive an UNREGISTER event.
4816 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004817 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 */
4819static void netdev_wait_allrefs(struct net_device *dev)
4820{
4821 unsigned long rebroadcast_time, warning_time;
4822
4823 rebroadcast_time = warning_time = jiffies;
4824 while (atomic_read(&dev->refcnt) != 0) {
4825 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004826 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827
4828 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004829 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830
4831 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4832 &dev->state)) {
4833 /* We must not have linkwatch events
4834 * pending on unregister. If this
4835 * happens, we simply run the queue
4836 * unscheduled, resulting in a noop
4837 * for this device.
4838 */
4839 linkwatch_run_queue();
4840 }
4841
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004842 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004843
4844 rebroadcast_time = jiffies;
4845 }
4846
4847 msleep(250);
4848
4849 if (time_after(jiffies, warning_time + 10 * HZ)) {
4850 printk(KERN_EMERG "unregister_netdevice: "
4851 "waiting for %s to become free. Usage "
4852 "count = %d\n",
4853 dev->name, atomic_read(&dev->refcnt));
4854 warning_time = jiffies;
4855 }
4856 }
4857}
4858
4859/* The sequence is:
4860 *
4861 * rtnl_lock();
4862 * ...
4863 * register_netdevice(x1);
4864 * register_netdevice(x2);
4865 * ...
4866 * unregister_netdevice(y1);
4867 * unregister_netdevice(y2);
4868 * ...
4869 * rtnl_unlock();
4870 * free_netdev(y1);
4871 * free_netdev(y2);
4872 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004873 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004875 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876 * without deadlocking with linkwatch via keventd.
4877 * 2) Since we run with the RTNL semaphore not held, we can sleep
4878 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004879 *
4880 * We must not return until all unregister events added during
4881 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004882 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883void netdev_run_todo(void)
4884{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004885 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004888 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004889
4890 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004891
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892 while (!list_empty(&list)) {
4893 struct net_device *dev
4894 = list_entry(list.next, struct net_device, todo_list);
4895 list_del(&dev->todo_list);
4896
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004897 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 printk(KERN_ERR "network todo '%s' but state %d\n",
4899 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004900 dump_stack();
4901 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004903
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004904 dev->reg_state = NETREG_UNREGISTERED;
4905
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004906 on_each_cpu(flush_backlog, dev, 1);
4907
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004908 netdev_wait_allrefs(dev);
4909
4910 /* paranoia */
4911 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004912 WARN_ON(dev->ip_ptr);
4913 WARN_ON(dev->ip6_ptr);
4914 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004915
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004916 if (dev->destructor)
4917 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004918
4919 /* Free network device */
4920 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922}
4923
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004924/**
4925 * dev_get_stats - get network device statistics
4926 * @dev: device to get statistics from
4927 *
4928 * Get network statistics from device. The device driver may provide
4929 * its own method by setting dev->netdev_ops->get_stats; otherwise
4930 * the internal statistics structure is used.
4931 */
4932const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00004933{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004934 const struct net_device_ops *ops = dev->netdev_ops;
4935
4936 if (ops->ndo_get_stats)
4937 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00004938 else {
4939 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
4940 struct net_device_stats *stats = &dev->stats;
4941 unsigned int i;
4942 struct netdev_queue *txq;
4943
4944 for (i = 0; i < dev->num_tx_queues; i++) {
4945 txq = netdev_get_tx_queue(dev, i);
4946 tx_bytes += txq->tx_bytes;
4947 tx_packets += txq->tx_packets;
4948 tx_dropped += txq->tx_dropped;
4949 }
4950 if (tx_bytes || tx_packets || tx_dropped) {
4951 stats->tx_bytes = tx_bytes;
4952 stats->tx_packets = tx_packets;
4953 stats->tx_dropped = tx_dropped;
4954 }
4955 return stats;
4956 }
Rusty Russellc45d2862007-03-28 14:29:08 -07004957}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004958EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07004959
David S. Millerdc2b4842008-07-08 17:18:23 -07004960static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004961 struct netdev_queue *queue,
4962 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004963{
David S. Millerdc2b4842008-07-08 17:18:23 -07004964 queue->dev = dev;
4965}
4966
David S. Millerbb949fb2008-07-08 16:55:56 -07004967static void netdev_init_queues(struct net_device *dev)
4968{
David S. Millere8a04642008-07-17 00:34:19 -07004969 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4970 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07004971 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07004972}
4973
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004975 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 * @sizeof_priv: size of private data to allocate space for
4977 * @name: device name format string
4978 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004979 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 *
4981 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004982 * and performs basic initialization. Also allocates subquue structs
4983 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004985struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4986 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987{
David S. Millere8a04642008-07-17 00:34:19 -07004988 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07004990 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00004991 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004993 BUG_ON(strlen(name) >= sizeof(dev->name));
4994
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004995 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004996 if (sizeof_priv) {
4997 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00004998 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004999 alloc_size += sizeof_priv;
5000 }
5001 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005002 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005004 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005006 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005007 return NULL;
5008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009
Stephen Hemminger79439862008-07-21 13:28:44 -07005010 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005011 if (!tx) {
5012 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5013 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005014 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005015 }
5016
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005017 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005019
5020 if (dev_addr_init(dev))
5021 goto free_tx;
5022
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005023 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024
David S. Millere8a04642008-07-17 00:34:19 -07005025 dev->_tx = tx;
5026 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005027 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005028
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005029 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030
David S. Millerbb949fb2008-07-08 16:55:56 -07005031 netdev_init_queues(dev);
5032
Herbert Xud565b0a2008-12-15 23:38:52 -08005033 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005034 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035 setup(dev);
5036 strcpy(dev->name, name);
5037 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005038
5039free_tx:
5040 kfree(tx);
5041
5042free_p:
5043 kfree(p);
5044 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005045}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005046EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047
5048/**
5049 * free_netdev - free network device
5050 * @dev: device
5051 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005052 * This function does the last stage of destroying an allocated device
5053 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 * If this is the last reference then it will be freed.
5055 */
5056void free_netdev(struct net_device *dev)
5057{
Herbert Xud565b0a2008-12-15 23:38:52 -08005058 struct napi_struct *p, *n;
5059
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005060 release_net(dev_net(dev));
5061
David S. Millere8a04642008-07-17 00:34:19 -07005062 kfree(dev->_tx);
5063
Jiri Pirkof001fde2009-05-05 02:48:28 +00005064 /* Flush device addresses */
5065 dev_addr_flush(dev);
5066
Herbert Xud565b0a2008-12-15 23:38:52 -08005067 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5068 netif_napi_del(p);
5069
Stephen Hemminger3041a062006-05-26 13:25:24 -07005070 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 if (dev->reg_state == NETREG_UNINITIALIZED) {
5072 kfree((char *)dev - dev->padded);
5073 return;
5074 }
5075
5076 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5077 dev->reg_state = NETREG_RELEASED;
5078
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005079 /* will free via device release */
5080 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005082
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005083/**
5084 * synchronize_net - Synchronize with packet receive processing
5085 *
5086 * Wait for packets currently being received to be done.
5087 * Does not block later packets from starting.
5088 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005089void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090{
5091 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005092 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093}
5094
5095/**
5096 * unregister_netdevice - remove device from the kernel
5097 * @dev: device
5098 *
5099 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005100 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 *
5102 * Callers must hold the rtnl semaphore. You may want
5103 * unregister_netdev() instead of this.
5104 */
5105
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005106void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107{
Herbert Xua6620712007-12-12 19:21:56 -08005108 ASSERT_RTNL();
5109
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005110 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 /* Finish processing unregister after unlock */
5112 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113}
5114
5115/**
5116 * unregister_netdev - remove device from the kernel
5117 * @dev: device
5118 *
5119 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005120 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 *
5122 * This is just a wrapper for unregister_netdevice that takes
5123 * the rtnl semaphore. In general you want to use this and not
5124 * unregister_netdevice.
5125 */
5126void unregister_netdev(struct net_device *dev)
5127{
5128 rtnl_lock();
5129 unregister_netdevice(dev);
5130 rtnl_unlock();
5131}
5132
5133EXPORT_SYMBOL(unregister_netdev);
5134
Eric W. Biedermance286d32007-09-12 13:53:49 +02005135/**
5136 * dev_change_net_namespace - move device to different nethost namespace
5137 * @dev: device
5138 * @net: network namespace
5139 * @pat: If not NULL name pattern to try if the current device name
5140 * is already taken in the destination network namespace.
5141 *
5142 * This function shuts down a device interface and moves it
5143 * to a new network namespace. On success 0 is returned, on
5144 * a failure a netagive errno code is returned.
5145 *
5146 * Callers must hold the rtnl semaphore.
5147 */
5148
5149int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5150{
5151 char buf[IFNAMSIZ];
5152 const char *destname;
5153 int err;
5154
5155 ASSERT_RTNL();
5156
5157 /* Don't allow namespace local devices to be moved. */
5158 err = -EINVAL;
5159 if (dev->features & NETIF_F_NETNS_LOCAL)
5160 goto out;
5161
Eric W. Biederman38918452008-10-27 17:51:47 -07005162#ifdef CONFIG_SYSFS
5163 /* Don't allow real devices to be moved when sysfs
5164 * is enabled.
5165 */
5166 err = -EINVAL;
5167 if (dev->dev.parent)
5168 goto out;
5169#endif
5170
Eric W. Biedermance286d32007-09-12 13:53:49 +02005171 /* Ensure the device has been registrered */
5172 err = -EINVAL;
5173 if (dev->reg_state != NETREG_REGISTERED)
5174 goto out;
5175
5176 /* Get out if there is nothing todo */
5177 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005178 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005179 goto out;
5180
5181 /* Pick the destination device name, and ensure
5182 * we can use it in the destination network namespace.
5183 */
5184 err = -EEXIST;
5185 destname = dev->name;
5186 if (__dev_get_by_name(net, destname)) {
5187 /* We get here if we can't use the current device name */
5188 if (!pat)
5189 goto out;
5190 if (!dev_valid_name(pat))
5191 goto out;
5192 if (strchr(pat, '%')) {
5193 if (__dev_alloc_name(net, pat, buf) < 0)
5194 goto out;
5195 destname = buf;
5196 } else
5197 destname = pat;
5198 if (__dev_get_by_name(net, destname))
5199 goto out;
5200 }
5201
5202 /*
5203 * And now a mini version of register_netdevice unregister_netdevice.
5204 */
5205
5206 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005207 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005208
5209 /* And unlink it from device chain */
5210 err = -ENODEV;
5211 unlist_netdevice(dev);
5212
5213 synchronize_net();
5214
5215 /* Shutdown queueing discipline. */
5216 dev_shutdown(dev);
5217
5218 /* Notify protocols, that we are about to destroy
5219 this device. They should clean all the things.
5220 */
5221 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5222
5223 /*
5224 * Flush the unicast and multicast chains
5225 */
5226 dev_addr_discard(dev);
5227
Eric W. Biederman38918452008-10-27 17:51:47 -07005228 netdev_unregister_kobject(dev);
5229
Eric W. Biedermance286d32007-09-12 13:53:49 +02005230 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005231 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005232
5233 /* Assign the new device name */
5234 if (destname != dev->name)
5235 strcpy(dev->name, destname);
5236
5237 /* If there is an ifindex conflict assign a new one */
5238 if (__dev_get_by_index(net, dev->ifindex)) {
5239 int iflink = (dev->iflink == dev->ifindex);
5240 dev->ifindex = dev_new_index(net);
5241 if (iflink)
5242 dev->iflink = dev->ifindex;
5243 }
5244
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005245 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005246 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005247 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005248
5249 /* Add the device back in the hashes */
5250 list_netdevice(dev);
5251
5252 /* Notify protocols, that a new device appeared. */
5253 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5254
5255 synchronize_net();
5256 err = 0;
5257out:
5258 return err;
5259}
5260
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261static int dev_cpu_callback(struct notifier_block *nfb,
5262 unsigned long action,
5263 void *ocpu)
5264{
5265 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005266 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 struct sk_buff *skb;
5268 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5269 struct softnet_data *sd, *oldsd;
5270
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005271 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 return NOTIFY_OK;
5273
5274 local_irq_disable();
5275 cpu = smp_processor_id();
5276 sd = &per_cpu(softnet_data, cpu);
5277 oldsd = &per_cpu(softnet_data, oldcpu);
5278
5279 /* Find end of our completion_queue. */
5280 list_skb = &sd->completion_queue;
5281 while (*list_skb)
5282 list_skb = &(*list_skb)->next;
5283 /* Append completion queue from offline CPU. */
5284 *list_skb = oldsd->completion_queue;
5285 oldsd->completion_queue = NULL;
5286
5287 /* Find end of our output_queue. */
5288 list_net = &sd->output_queue;
5289 while (*list_net)
5290 list_net = &(*list_net)->next_sched;
5291 /* Append output queue from offline CPU. */
5292 *list_net = oldsd->output_queue;
5293 oldsd->output_queue = NULL;
5294
5295 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5296 local_irq_enable();
5297
5298 /* Process offline CPU's input_pkt_queue */
5299 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5300 netif_rx(skb);
5301
5302 return NOTIFY_OK;
5303}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304
5305
Herbert Xu7f353bf2007-08-10 15:47:58 -07005306/**
Herbert Xub63365a2008-10-23 01:11:29 -07005307 * netdev_increment_features - increment feature set by one
5308 * @all: current feature set
5309 * @one: new feature set
5310 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005311 *
5312 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005313 * @one to the master device with current feature set @all. Will not
5314 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005315 */
Herbert Xub63365a2008-10-23 01:11:29 -07005316unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5317 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005318{
Herbert Xub63365a2008-10-23 01:11:29 -07005319 /* If device needs checksumming, downgrade to it. */
5320 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5321 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5322 else if (mask & NETIF_F_ALL_CSUM) {
5323 /* If one device supports v4/v6 checksumming, set for all. */
5324 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5325 !(all & NETIF_F_GEN_CSUM)) {
5326 all &= ~NETIF_F_ALL_CSUM;
5327 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5328 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005329
Herbert Xub63365a2008-10-23 01:11:29 -07005330 /* If one device supports hw checksumming, set for all. */
5331 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5332 all &= ~NETIF_F_ALL_CSUM;
5333 all |= NETIF_F_HW_CSUM;
5334 }
5335 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005336
Herbert Xub63365a2008-10-23 01:11:29 -07005337 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005338
Herbert Xub63365a2008-10-23 01:11:29 -07005339 one |= all & NETIF_F_ONE_FOR_ALL;
5340 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5341 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005342
5343 return all;
5344}
Herbert Xub63365a2008-10-23 01:11:29 -07005345EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005346
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005347static struct hlist_head *netdev_create_hash(void)
5348{
5349 int i;
5350 struct hlist_head *hash;
5351
5352 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5353 if (hash != NULL)
5354 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5355 INIT_HLIST_HEAD(&hash[i]);
5356
5357 return hash;
5358}
5359
Eric W. Biederman881d9662007-09-17 11:56:21 -07005360/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005361static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005362{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005363 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005364
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005365 net->dev_name_head = netdev_create_hash();
5366 if (net->dev_name_head == NULL)
5367 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005368
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005369 net->dev_index_head = netdev_create_hash();
5370 if (net->dev_index_head == NULL)
5371 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005372
5373 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005374
5375err_idx:
5376 kfree(net->dev_name_head);
5377err_name:
5378 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005379}
5380
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005381/**
5382 * netdev_drivername - network driver for the device
5383 * @dev: network device
5384 * @buffer: buffer for resulting name
5385 * @len: size of buffer
5386 *
5387 * Determine network driver for device.
5388 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005389char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005390{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005391 const struct device_driver *driver;
5392 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005393
5394 if (len <= 0 || !buffer)
5395 return buffer;
5396 buffer[0] = 0;
5397
5398 parent = dev->dev.parent;
5399
5400 if (!parent)
5401 return buffer;
5402
5403 driver = parent->driver;
5404 if (driver && driver->name)
5405 strlcpy(buffer, driver->name, len);
5406 return buffer;
5407}
5408
Pavel Emelyanov46650792007-10-08 20:38:39 -07005409static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005410{
5411 kfree(net->dev_name_head);
5412 kfree(net->dev_index_head);
5413}
5414
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005415static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005416 .init = netdev_init,
5417 .exit = netdev_exit,
5418};
5419
Pavel Emelyanov46650792007-10-08 20:38:39 -07005420static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005421{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005422 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005423 /*
5424 * Push all migratable of the network devices back to the
5425 * initial network namespace
5426 */
5427 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005428restart:
5429 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005430 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005431 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005432
5433 /* Ignore unmoveable devices (i.e. loopback) */
5434 if (dev->features & NETIF_F_NETNS_LOCAL)
5435 continue;
5436
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005437 /* Delete virtual devices */
5438 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5439 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005440 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005441 }
5442
Eric W. Biedermance286d32007-09-12 13:53:49 +02005443 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005444 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5445 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005446 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005447 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005448 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005449 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005450 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005451 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005452 }
5453 rtnl_unlock();
5454}
5455
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005456static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005457 .exit = default_device_exit,
5458};
5459
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460/*
5461 * Initialize the DEV module. At boot time this walks the device list and
5462 * unhooks any devices that fail to initialise (normally hardware not
5463 * present) and leaves us with a valid list of present and active devices.
5464 *
5465 */
5466
5467/*
5468 * This is called single threaded during boot, so no need
5469 * to take the rtnl semaphore.
5470 */
5471static int __init net_dev_init(void)
5472{
5473 int i, rc = -ENOMEM;
5474
5475 BUG_ON(!dev_boot_phase);
5476
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477 if (dev_proc_init())
5478 goto out;
5479
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005480 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005481 goto out;
5482
5483 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005484 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485 INIT_LIST_HEAD(&ptype_base[i]);
5486
Eric W. Biederman881d9662007-09-17 11:56:21 -07005487 if (register_pernet_subsys(&netdev_net_ops))
5488 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005489
5490 /*
5491 * Initialise the packet receive queues.
5492 */
5493
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005494 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495 struct softnet_data *queue;
5496
5497 queue = &per_cpu(softnet_data, i);
5498 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005499 queue->completion_queue = NULL;
5500 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005501
5502 queue->backlog.poll = process_backlog;
5503 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005504 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005505 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 }
5507
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508 dev_boot_phase = 0;
5509
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005510 /* The loopback device is special if any other network devices
5511 * is present in a network namespace the loopback device must
5512 * be present. Since we now dynamically allocate and free the
5513 * loopback device ensure this invariant is maintained by
5514 * keeping the loopback device as the first device on the
5515 * list of network devices. Ensuring the loopback devices
5516 * is the first device that appears and the last network device
5517 * that disappears.
5518 */
5519 if (register_pernet_device(&loopback_net_ops))
5520 goto out;
5521
5522 if (register_pernet_device(&default_device_ops))
5523 goto out;
5524
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005525 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5526 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005527
5528 hotcpu_notifier(dev_cpu_callback, 0);
5529 dst_init();
5530 dev_mcast_init();
5531 rc = 0;
5532out:
5533 return rc;
5534}
5535
5536subsys_initcall(net_dev_init);
5537
Krishna Kumare88721f2009-02-18 17:55:02 -08005538static int __init initialize_hashrnd(void)
5539{
5540 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5541 return 0;
5542}
5543
5544late_initcall_sync(initialize_hashrnd);
5545
Linus Torvalds1da177e2005-04-16 15:20:36 -07005546EXPORT_SYMBOL(__dev_get_by_index);
5547EXPORT_SYMBOL(__dev_get_by_name);
5548EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005549EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005550EXPORT_SYMBOL(dev_add_pack);
5551EXPORT_SYMBOL(dev_alloc_name);
5552EXPORT_SYMBOL(dev_close);
5553EXPORT_SYMBOL(dev_get_by_flags);
5554EXPORT_SYMBOL(dev_get_by_index);
5555EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005556EXPORT_SYMBOL(dev_open);
5557EXPORT_SYMBOL(dev_queue_xmit);
5558EXPORT_SYMBOL(dev_remove_pack);
5559EXPORT_SYMBOL(dev_set_allmulti);
5560EXPORT_SYMBOL(dev_set_promiscuity);
5561EXPORT_SYMBOL(dev_change_flags);
5562EXPORT_SYMBOL(dev_set_mtu);
5563EXPORT_SYMBOL(dev_set_mac_address);
5564EXPORT_SYMBOL(free_netdev);
5565EXPORT_SYMBOL(netdev_boot_setup_check);
5566EXPORT_SYMBOL(netdev_set_master);
5567EXPORT_SYMBOL(netdev_state_change);
5568EXPORT_SYMBOL(netif_receive_skb);
5569EXPORT_SYMBOL(netif_rx);
5570EXPORT_SYMBOL(register_gifconf);
5571EXPORT_SYMBOL(register_netdevice);
5572EXPORT_SYMBOL(register_netdevice_notifier);
5573EXPORT_SYMBOL(skb_checksum_help);
5574EXPORT_SYMBOL(synchronize_net);
5575EXPORT_SYMBOL(unregister_netdevice);
5576EXPORT_SYMBOL(unregister_netdevice_notifier);
5577EXPORT_SYMBOL(net_enable_timestamp);
5578EXPORT_SYMBOL(net_disable_timestamp);
5579EXPORT_SYMBOL(dev_get_flags);
5580
5581#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5582EXPORT_SYMBOL(br_handle_frame_hook);
5583EXPORT_SYMBOL(br_fdb_get_hook);
5584EXPORT_SYMBOL(br_fdb_put_hook);
5585#endif
5586
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588
5589EXPORT_PER_CPU_SYMBOL(softnet_data);