blob: 5eb3e48ab31dfee8725a5bba263b388d385f233e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Neil Horman4ea7e382009-05-21 07:36:08 +0000129#include <trace/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273
274static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290
291static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700292static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
294static inline unsigned short netdev_lock_pos(unsigned short dev_type)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 if (netdev_lock_type[i] == dev_type)
300 return i;
301 /* the last key is used by default */
302 return ARRAY_SIZE(netdev_lock_type) - 1;
303}
304
David S. Millercf508b12008-07-22 14:16:42 -0700305static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700307{
308 int i;
309
310 i = netdev_lock_pos(dev_type);
311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 netdev_lock_name[i]);
313}
David S. Millercf508b12008-07-22 14:16:42 -0700314
315static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
316{
317 int i;
318
319 i = netdev_lock_pos(dev->type);
320 lockdep_set_class_and_name(&dev->addr_list_lock,
321 &netdev_addr_lock_key[i],
322 netdev_lock_name[i]);
323}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700324#else
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
327{
328}
329static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330{
331}
332#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/*******************************************************************************
335
336 Protocol management and registration routines
337
338*******************************************************************************/
339
340/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * Add a protocol ID to the list. Now that the input handler is
342 * smarter we can dispense with all the messy stuff that used to be
343 * here.
344 *
345 * BEWARE!!! Protocol handlers, mangling input packets,
346 * MUST BE last in hash buckets and checking protocol handlers
347 * MUST start from promiscuous ptype_all chain in net_bh.
348 * It is true now, do not change it.
349 * Explanation follows: if protocol handler, mangling packet, will
350 * be the first on list, it is not able to sense, that packet
351 * is cloned and should be copied-on-write, so that it will
352 * change it and subsequent readers will get broken packet.
353 * --ANK (980803)
354 */
355
356/**
357 * dev_add_pack - add packet handler
358 * @pt: packet type declaration
359 *
360 * Add a protocol handler to the networking stack. The passed &packet_type
361 * is linked into kernel lists and may not be freed until it has been
362 * removed from the kernel lists.
363 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900364 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * guarantee all CPU's that are in middle of receiving packets
366 * will see the new packet type (until the next received packet).
367 */
368
369void dev_add_pack(struct packet_type *pt)
370{
371 int hash;
372
373 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700374 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800377 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 list_add_rcu(&pt->list, &ptype_base[hash]);
379 }
380 spin_unlock_bh(&ptype_lock);
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
386 *
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900390 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
395 */
396void __dev_remove_pack(struct packet_type *pt)
397{
398 struct list_head *head;
399 struct packet_type *pt1;
400
401 spin_lock_bh(&ptype_lock);
402
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700403 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 list_for_each_entry(pt1, head, list) {
409 if (pt == pt1) {
410 list_del_rcu(&pt->list);
411 goto out;
412 }
413 }
414
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416out:
417 spin_unlock_bh(&ptype_lock);
418}
419/**
420 * dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
422 *
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
426 * returns.
427 *
428 * This call sleeps to guarantee that no CPU is looking at the packet
429 * type after return.
430 */
431void dev_remove_pack(struct packet_type *pt)
432{
433 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 synchronize_net();
436}
437
438/******************************************************************************
439
440 Device Boot-time Settings Routines
441
442*******************************************************************************/
443
444/* Boot time configuration table */
445static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
446
447/**
448 * netdev_boot_setup_add - add new setup entry
449 * @name: name of the device
450 * @map: configured settings for the device
451 *
452 * Adds new setup entry to the dev_boot_setup list. The function
453 * returns 0 on error and 1 on success. This is a generic routine to
454 * all netdevices.
455 */
456static int netdev_boot_setup_add(char *name, struct ifmap *map)
457{
458 struct netdev_boot_setup *s;
459 int i;
460
461 s = dev_boot_setup;
462 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
463 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
464 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700465 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 memcpy(&s[i].map, map, sizeof(s[i].map));
467 break;
468 }
469 }
470
471 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
472}
473
474/**
475 * netdev_boot_setup_check - check boot time settings
476 * @dev: the netdevice
477 *
478 * Check boot time settings for the device.
479 * The found settings are set for the device to be used
480 * later in the device probing.
481 * Returns 0 if no settings found, 1 if they are.
482 */
483int netdev_boot_setup_check(struct net_device *dev)
484{
485 struct netdev_boot_setup *s = dev_boot_setup;
486 int i;
487
488 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
489 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700490 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 dev->irq = s[i].map.irq;
492 dev->base_addr = s[i].map.base_addr;
493 dev->mem_start = s[i].map.mem_start;
494 dev->mem_end = s[i].map.mem_end;
495 return 1;
496 }
497 }
498 return 0;
499}
500
501
502/**
503 * netdev_boot_base - get address from boot time settings
504 * @prefix: prefix for network device
505 * @unit: id for network device
506 *
507 * Check boot time settings for the base address of device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found.
511 */
512unsigned long netdev_boot_base(const char *prefix, int unit)
513{
514 const struct netdev_boot_setup *s = dev_boot_setup;
515 char name[IFNAMSIZ];
516 int i;
517
518 sprintf(name, "%s%d", prefix, unit);
519
520 /*
521 * If device already registered then return base of 1
522 * to indicate not to probe for this interface
523 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700524 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return 1;
526
527 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
528 if (!strcmp(name, s[i].name))
529 return s[i].map.base_addr;
530 return 0;
531}
532
533/*
534 * Saves at boot time configured settings for any netdevice.
535 */
536int __init netdev_boot_setup(char *str)
537{
538 int ints[5];
539 struct ifmap map;
540
541 str = get_options(str, ARRAY_SIZE(ints), ints);
542 if (!str || !*str)
543 return 0;
544
545 /* Save settings */
546 memset(&map, 0, sizeof(map));
547 if (ints[0] > 0)
548 map.irq = ints[1];
549 if (ints[0] > 1)
550 map.base_addr = ints[2];
551 if (ints[0] > 2)
552 map.mem_start = ints[3];
553 if (ints[0] > 3)
554 map.mem_end = ints[4];
555
556 /* Add new entry to the list */
557 return netdev_boot_setup_add(str, &map);
558}
559
560__setup("netdev=", netdev_boot_setup);
561
562/*******************************************************************************
563
564 Device Interface Subroutines
565
566*******************************************************************************/
567
568/**
569 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700570 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * @name: name to find
572 *
573 * Find an interface by name. Must be called under RTNL semaphore
574 * or @dev_base_lock. If the name is found a pointer to the device
575 * is returned. If the name is not found then %NULL is returned. The
576 * reference counters are not incremented so the caller must be
577 * careful with locks.
578 */
579
Eric W. Biederman881d9662007-09-17 11:56:21 -0700580struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 struct hlist_node *p;
583
Eric W. Biederman881d9662007-09-17 11:56:21 -0700584 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct net_device *dev
586 = hlist_entry(p, struct net_device, name_hlist);
587 if (!strncmp(dev->name, name, IFNAMSIZ))
588 return dev;
589 }
590 return NULL;
591}
592
593/**
594 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700595 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @name: name to find
597 *
598 * Find an interface by name. This can be called from any
599 * context and does its own locking. The returned handle has
600 * the usage count incremented and the caller must use dev_put() to
601 * release it when it is no longer needed. %NULL is returned if no
602 * matching device is found.
603 */
604
Eric W. Biederman881d9662007-09-17 11:56:21 -0700605struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct net_device *dev;
608
609 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 if (dev)
612 dev_hold(dev);
613 read_unlock(&dev_base_lock);
614 return dev;
615}
616
617/**
618 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700619 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 * @ifindex: index of device
621 *
622 * Search for an interface by index. Returns %NULL if the device
623 * is not found or a pointer to the device. The device has not
624 * had its reference counter increased so the caller must be careful
625 * about locking. The caller must hold either the RTNL semaphore
626 * or @dev_base_lock.
627 */
628
Eric W. Biederman881d9662007-09-17 11:56:21 -0700629struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 struct hlist_node *p;
632
Eric W. Biederman881d9662007-09-17 11:56:21 -0700633 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 struct net_device *dev
635 = hlist_entry(p, struct net_device, index_hlist);
636 if (dev->ifindex == ifindex)
637 return dev;
638 }
639 return NULL;
640}
641
642
643/**
644 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700645 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * @ifindex: index of device
647 *
648 * Search for an interface by index. Returns NULL if the device
649 * is not found or a pointer to the device. The device returned has
650 * had a reference added and the pointer is safe until the user calls
651 * dev_put to indicate they have finished with it.
652 */
653
Eric W. Biederman881d9662007-09-17 11:56:21 -0700654struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 struct net_device *dev;
657
658 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700659 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (dev)
661 dev_hold(dev);
662 read_unlock(&dev_base_lock);
663 return dev;
664}
665
666/**
667 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700668 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 * @type: media type of device
670 * @ha: hardware address
671 *
672 * Search for an interface by MAC address. Returns NULL if the device
673 * is not found or a pointer to the device. The caller must hold the
674 * rtnl semaphore. The returned device has not had its ref count increased
675 * and the caller must therefore be careful about locking
676 *
677 * BUGS:
678 * If the API was consistent this would be __dev_get_by_hwaddr
679 */
680
Eric W. Biederman881d9662007-09-17 11:56:21 -0700681struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct net_device *dev;
684
685 ASSERT_RTNL();
686
Denis V. Lunev81103a52007-12-12 10:47:38 -0800687 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (dev->type == type &&
689 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700690 return dev;
691
692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300695EXPORT_SYMBOL(dev_getbyhwaddr);
696
Eric W. Biederman881d9662007-09-17 11:56:21 -0700697struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700698{
699 struct net_device *dev;
700
701 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700702 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700703 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700704 return dev;
705
706 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700707}
708
709EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710
Eric W. Biederman881d9662007-09-17 11:56:21 -0700711struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 struct net_device *dev;
714
715 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717 if (dev)
718 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 rtnl_unlock();
720 return dev;
721}
722
723EXPORT_SYMBOL(dev_getfirstbyhwtype);
724
725/**
726 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700727 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 * @if_flags: IFF_* values
729 * @mask: bitmask of bits in if_flags to check
730 *
731 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900732 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * had a reference added and the pointer is safe until the user calls
734 * dev_put to indicate they have finished with it.
735 */
736
Eric W. Biederman881d9662007-09-17 11:56:21 -0700737struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (((dev->flags ^ if_flags) & mask) == 0) {
745 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700746 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 break;
748 }
749 }
750 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/**
755 * dev_valid_name - check if name is okay for network device
756 * @name: name string
757 *
758 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700759 * to allow sysfs to work. We also disallow any kind of
760 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800762int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700764 if (*name == '\0')
765 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700766 if (strlen(name) >= IFNAMSIZ)
767 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700768 if (!strcmp(name, ".") || !strcmp(name, ".."))
769 return 0;
770
771 while (*name) {
772 if (*name == '/' || isspace(*name))
773 return 0;
774 name++;
775 }
776 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200780 * __dev_alloc_name - allocate a name for a device
781 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200783 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
785 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700786 * id. It scans list of devices to build up a free map, then chooses
787 * the first empty slot. The caller must hold the dev_base or rtnl lock
788 * while allocating the name and adding the device in order to avoid
789 * duplicates.
790 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
791 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 const char *p;
798 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700799 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct net_device *d;
801
802 p = strnchr(name, IFNAMSIZ-1, '%');
803 if (p) {
804 /*
805 * Verify the string as this thing may have come from
806 * the user. There must be either one "%d" and no other "%"
807 * characters.
808 */
809 if (p[1] != 'd' || strchr(p + 2, '%'))
810 return -EINVAL;
811
812 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700813 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!inuse)
815 return -ENOMEM;
816
Eric W. Biederman881d9662007-09-17 11:56:21 -0700817 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!sscanf(d->name, name, &i))
819 continue;
820 if (i < 0 || i >= max_netdevices)
821 continue;
822
823 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200824 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!strncmp(buf, d->name, IFNAMSIZ))
826 set_bit(i, inuse);
827 }
828
829 i = find_first_zero_bit(inuse, max_netdevices);
830 free_page((unsigned long) inuse);
831 }
832
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200833 snprintf(buf, IFNAMSIZ, name, i);
834 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /* It is possible to run out of possible slots
838 * when the name is long and there isn't enough space left
839 * for the digits, or if all bits are used.
840 */
841 return -ENFILE;
842}
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844/**
845 * dev_alloc_name - allocate a name for a device
846 * @dev: device
847 * @name: name format string
848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
856 */
857
858int dev_alloc_name(struct net_device *dev, const char *name)
859{
860 char buf[IFNAMSIZ];
861 struct net *net;
862 int ret;
863
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900864 BUG_ON(!dev_net(dev));
865 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200866 ret = __dev_alloc_name(net, name, buf);
867 if (ret >= 0)
868 strlcpy(dev->name, buf, IFNAMSIZ);
869 return ret;
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873/**
874 * dev_change_name - change name of a device
875 * @dev: device
876 * @newname: name (or format string) must be at least IFNAMSIZ
877 *
878 * Change name of a device, can pass format strings "eth%d".
879 * for wildcarding.
880 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700881int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Herbert Xufcc5a032007-07-30 17:03:38 -0700883 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700886 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900889 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (dev->flags & IFF_UP)
893 return -EBUSY;
894
895 if (!dev_valid_name(newname))
896 return -EINVAL;
897
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700898 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
899 return 0;
900
Herbert Xufcc5a032007-07-30 17:03:38 -0700901 memcpy(oldname, dev->name, IFNAMSIZ);
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (strchr(newname, '%')) {
904 err = dev_alloc_name(dev, newname);
905 if (err < 0)
906 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700908 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return -EEXIST;
910 else
911 strlcpy(dev->name, newname, IFNAMSIZ);
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700914 /* For now only devices in the initial network namespace
915 * are in sysfs.
916 */
917 if (net == &init_net) {
918 ret = device_rename(&dev->dev, dev->name);
919 if (ret) {
920 memcpy(dev->name, oldname, IFNAMSIZ);
921 return ret;
922 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700923 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700924
925 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600926 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700927 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700928 write_unlock_bh(&dev_base_lock);
929
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700930 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700931 ret = notifier_to_errno(ret);
932
933 if (ret) {
934 if (err) {
935 printk(KERN_ERR
936 "%s: name change rollback failed: %d.\n",
937 dev->name, ret);
938 } else {
939 err = ret;
940 memcpy(dev->name, oldname, IFNAMSIZ);
941 goto rollback;
942 }
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 return err;
946}
947
948/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700949 * dev_set_alias - change ifalias of a device
950 * @dev: device
951 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700952 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700953 *
954 * Set ifalias for a device,
955 */
956int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
957{
958 ASSERT_RTNL();
959
960 if (len >= IFALIASZ)
961 return -EINVAL;
962
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700963 if (!len) {
964 if (dev->ifalias) {
965 kfree(dev->ifalias);
966 dev->ifalias = NULL;
967 }
968 return 0;
969 }
970
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700971 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
972 if (!dev->ifalias)
973 return -ENOMEM;
974
975 strlcpy(dev->ifalias, alias, len+1);
976 return len;
977}
978
979
980/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700981 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700982 * @dev: device to cause notification
983 *
984 * Called to indicate a device has changed features.
985 */
986void netdev_features_change(struct net_device *dev)
987{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700988 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700989}
990EXPORT_SYMBOL(netdev_features_change);
991
992/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * netdev_state_change - device changes state
994 * @dev: device to cause notification
995 *
996 * Called to indicate a device has changed state. This function calls
997 * the notifier chains for netdev_chain and sends a NEWLINK message
998 * to the routing socket.
999 */
1000void netdev_state_change(struct net_device *dev)
1001{
1002 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001003 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1005 }
1006}
1007
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001008void netdev_bonding_change(struct net_device *dev)
1009{
1010 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011}
1012EXPORT_SYMBOL(netdev_bonding_change);
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014/**
1015 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001016 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 * @name: name of interface
1018 *
1019 * If a network interface is not present and the process has suitable
1020 * privileges this function loads the module. If module loading is not
1021 * available in this kernel then it becomes a nop.
1022 */
1023
Eric W. Biederman881d9662007-09-17 11:56:21 -07001024void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001026 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001029 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 read_unlock(&dev_base_lock);
1031
1032 if (!dev && capable(CAP_SYS_MODULE))
1033 request_module("%s", name);
1034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036/**
1037 * dev_open - prepare an interface for use.
1038 * @dev: device to open
1039 *
1040 * Takes a device from down to up state. The device's private open
1041 * function is invoked and then the multicast lists are loaded. Finally
1042 * the device is moved into the up state and a %NETDEV_UP message is
1043 * sent to the netdev notifier chain.
1044 *
1045 * Calling this function on an active interface is a nop. On a failure
1046 * a negative errno code is returned.
1047 */
1048int dev_open(struct net_device *dev)
1049{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001050 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 int ret = 0;
1052
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001053 ASSERT_RTNL();
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 /*
1056 * Is it already up?
1057 */
1058
1059 if (dev->flags & IFF_UP)
1060 return 0;
1061
1062 /*
1063 * Is it even present?
1064 */
1065 if (!netif_device_present(dev))
1066 return -ENODEV;
1067
1068 /*
1069 * Call device private open method
1070 */
1071 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001072
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001073 if (ops->ndo_validate_addr)
1074 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001075
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001076 if (!ret && ops->ndo_open)
1077 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 * If it went open OK then:
1081 */
1082
Jeff Garzikbada3392007-10-23 20:19:37 -07001083 if (ret)
1084 clear_bit(__LINK_STATE_START, &dev->state);
1085 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * Set the flags.
1088 */
1089 dev->flags |= IFF_UP;
1090
1091 /*
Dan Williams649274d2009-01-11 00:20:39 -08001092 * Enable NET_DMA
1093 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001094 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001095
1096 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * Initialize multicasting status
1098 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001099 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /*
1102 * Wakeup transmit queue engine
1103 */
1104 dev_activate(dev);
1105
1106 /*
1107 * ... and announce new interface.
1108 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001109 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return ret;
1113}
1114
1115/**
1116 * dev_close - shutdown an interface.
1117 * @dev: device to shutdown
1118 *
1119 * This function moves an active device into down state. A
1120 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1121 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1122 * chain.
1123 */
1124int dev_close(struct net_device *dev)
1125{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001126 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001127 ASSERT_RTNL();
1128
David S. Miller9d5010d2007-09-12 14:33:25 +02001129 might_sleep();
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001150 dev_deactivate(dev);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001159 if (ops->ndo_stop)
1160 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001171 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Dan Williams649274d2009-01-11 00:20:39 -08001173 /*
1174 * Shutdown NET_DMA
1175 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001176 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 return 0;
1179}
1180
1181
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001182/**
1183 * dev_disable_lro - disable Large Receive Offload on a device
1184 * @dev: device
1185 *
1186 * Disable Large Receive Offload (LRO) on a net device. Must be
1187 * called under RTNL. This is needed if received packets may be
1188 * forwarded to another interface.
1189 */
1190void dev_disable_lro(struct net_device *dev)
1191{
1192 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1193 dev->ethtool_ops->set_flags) {
1194 u32 flags = dev->ethtool_ops->get_flags(dev);
1195 if (flags & ETH_FLAG_LRO) {
1196 flags &= ~ETH_FLAG_LRO;
1197 dev->ethtool_ops->set_flags(dev, flags);
1198 }
1199 }
1200 WARN_ON(dev->features & NETIF_F_LRO);
1201}
1202EXPORT_SYMBOL(dev_disable_lro);
1203
1204
Eric W. Biederman881d9662007-09-17 11:56:21 -07001205static int dev_boot_phase = 1;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/*
1208 * Device change register/unregister. These are not inline or static
1209 * as we export them to the world.
1210 */
1211
1212/**
1213 * register_netdevice_notifier - register a network notifier block
1214 * @nb: notifier
1215 *
1216 * Register a notifier to be called when network device events occur.
1217 * The notifier passed is linked into the kernel structures and must
1218 * not be reused until it has been unregistered. A negative errno code
1219 * is returned on a failure.
1220 *
1221 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001222 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 * view of the network device list.
1224 */
1225
1226int register_netdevice_notifier(struct notifier_block *nb)
1227{
1228 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001229 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001230 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 int err;
1232
1233 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001234 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001235 if (err)
1236 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 if (dev_boot_phase)
1238 goto unlock;
1239 for_each_net(net) {
1240 for_each_netdev(net, dev) {
1241 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1242 err = notifier_to_errno(err);
1243 if (err)
1244 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Eric W. Biederman881d9662007-09-17 11:56:21 -07001246 if (!(dev->flags & IFF_UP))
1247 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001248
Eric W. Biederman881d9662007-09-17 11:56:21 -07001249 nb->notifier_call(nb, NETDEV_UP, dev);
1250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001252
1253unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 rtnl_unlock();
1255 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001256
1257rollback:
1258 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001259 for_each_net(net) {
1260 for_each_netdev(net, dev) {
1261 if (dev == last)
1262 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
Eric W. Biederman881d9662007-09-17 11:56:21 -07001264 if (dev->flags & IFF_UP) {
1265 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1266 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 }
1268 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001269 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001270 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001271
1272 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001273 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/**
1277 * unregister_netdevice_notifier - unregister a network notifier block
1278 * @nb: notifier
1279 *
1280 * Unregister a notifier previously registered by
1281 * register_netdevice_notifier(). The notifier is unlinked into the
1282 * kernel structures and may then be reused. A negative errno code
1283 * is returned on a failure.
1284 */
1285
1286int unregister_netdevice_notifier(struct notifier_block *nb)
1287{
Herbert Xu9f514952006-03-25 01:24:25 -08001288 int err;
1289
1290 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001291 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001292 rtnl_unlock();
1293 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296/**
1297 * call_netdevice_notifiers - call all network notifier blocks
1298 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001299 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 *
1301 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001302 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
1304
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001305int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001307 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
1310/* When > 0 there are consumers of rx skb time stamps */
1311static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312
1313void net_enable_timestamp(void)
1314{
1315 atomic_inc(&netstamp_needed);
1316}
1317
1318void net_disable_timestamp(void)
1319{
1320 atomic_dec(&netstamp_needed);
1321}
1322
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001323static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001326 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001327 else
1328 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329}
1330
1331/*
1332 * Support routine. Sends outgoing frames to any network
1333 * taps currently in use.
1334 */
1335
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001336static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001339
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001340#ifdef CONFIG_NET_CLS_ACT
1341 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1342 net_timestamp(skb);
1343#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001344 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001345#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 rcu_read_lock();
1348 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1349 /* Never send packets back to the socket
1350 * they originated from - MvS (miquels@drinkel.ow.org)
1351 */
1352 if ((ptype->dev == dev || !ptype->dev) &&
1353 (ptype->af_packet_priv == NULL ||
1354 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1355 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1356 if (!skb2)
1357 break;
1358
1359 /* skb->nh should be correctly
1360 set by sender, so that the second statement is
1361 just protection against buggy protocols.
1362 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001363 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001365 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001366 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (net_ratelimit())
1368 printk(KERN_CRIT "protocol %04x is "
1369 "buggy, dev %s\n",
1370 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001371 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 }
1373
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001374 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001376 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 }
1379 rcu_read_unlock();
1380}
1381
Denis Vlasenko56079432006-03-29 15:57:29 -08001382
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001383static inline void __netif_reschedule(struct Qdisc *q)
1384{
1385 struct softnet_data *sd;
1386 unsigned long flags;
1387
1388 local_irq_save(flags);
1389 sd = &__get_cpu_var(softnet_data);
1390 q->next_sched = sd->output_queue;
1391 sd->output_queue = q;
1392 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1393 local_irq_restore(flags);
1394}
1395
David S. Miller37437bb2008-07-16 02:15:04 -07001396void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001397{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001398 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1399 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001400}
1401EXPORT_SYMBOL(__netif_schedule);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001405 if (atomic_dec_and_test(&skb->users)) {
1406 struct softnet_data *sd;
1407 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001408
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 skb->next = sd->completion_queue;
1412 sd->completion_queue = skb;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1415 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001416}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001417EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001418
1419void dev_kfree_skb_any(struct sk_buff *skb)
1420{
1421 if (in_irq() || irqs_disabled())
1422 dev_kfree_skb_irq(skb);
1423 else
1424 dev_kfree_skb(skb);
1425}
1426EXPORT_SYMBOL(dev_kfree_skb_any);
1427
1428
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001429/**
1430 * netif_device_detach - mark device as removed
1431 * @dev: network device
1432 *
1433 * Mark device as removed from system and therefore no longer available.
1434 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001435void netif_device_detach(struct net_device *dev)
1436{
1437 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1438 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001439 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001440 }
1441}
1442EXPORT_SYMBOL(netif_device_detach);
1443
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001444/**
1445 * netif_device_attach - mark device as attached
1446 * @dev: network device
1447 *
1448 * Mark device as attached from system and restart if needed.
1449 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001450void netif_device_attach(struct net_device *dev)
1451{
1452 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1453 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001454 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001456 }
1457}
1458EXPORT_SYMBOL(netif_device_attach);
1459
Ben Hutchings6de329e2008-06-16 17:02:28 -07001460static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1461{
1462 return ((features & NETIF_F_GEN_CSUM) ||
1463 ((features & NETIF_F_IP_CSUM) &&
1464 protocol == htons(ETH_P_IP)) ||
1465 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001466 protocol == htons(ETH_P_IPV6)) ||
1467 ((features & NETIF_F_FCOE_CRC) &&
1468 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001469}
1470
1471static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1472{
1473 if (can_checksum_protocol(dev->features, skb->protocol))
1474 return true;
1475
1476 if (skb->protocol == htons(ETH_P_8021Q)) {
1477 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1478 if (can_checksum_protocol(dev->features & dev->vlan_features,
1479 veh->h_vlan_encapsulated_proto))
1480 return true;
1481 }
1482
1483 return false;
1484}
Denis Vlasenko56079432006-03-29 15:57:29 -08001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486/*
1487 * Invalidate hardware checksum when packet is to be mangled, and
1488 * complete checksum manually on outgoing path.
1489 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001490int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
Al Virod3bc23e2006-11-14 21:24:49 -08001492 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001493 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Patrick McHardy84fa7932006-08-29 16:44:56 -07001495 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001496 goto out_set_summed;
1497
1498 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001499 /* Let GSO fix up the checksum. */
1500 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 }
1502
Herbert Xua0308472007-10-15 01:47:15 -07001503 offset = skb->csum_start - skb_headroom(skb);
1504 BUG_ON(offset >= skb_headlen(skb));
1505 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1506
1507 offset += skb->csum_offset;
1508 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1509
1510 if (skb_cloned(skb) &&
1511 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1513 if (ret)
1514 goto out;
1515 }
1516
Herbert Xua0308472007-10-15 01:47:15 -07001517 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001518out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001520out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001524/**
1525 * skb_gso_segment - Perform segmentation on skb.
1526 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001527 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001528 *
1529 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001530 *
1531 * It may return NULL if the skb requires no segmentation. This is
1532 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001533 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001534struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535{
1536 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1537 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001538 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001539 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001541 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001542 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001543 __skb_pull(skb, skb->mac_len);
1544
Herbert Xu67fd1a72009-01-19 16:26:44 -08001545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1546 struct net_device *dev = skb->dev;
1547 struct ethtool_drvinfo info = {};
1548
1549 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1550 dev->ethtool_ops->get_drvinfo(dev, &info);
1551
1552 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1553 "ip_summed=%d",
1554 info.driver, dev ? dev->features : 0L,
1555 skb->sk ? skb->sk->sk_route_caps : 0L,
1556 skb->len, skb->data_len, skb->ip_summed);
1557
Herbert Xua430a432006-07-08 13:34:56 -07001558 if (skb_header_cloned(skb) &&
1559 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1560 return ERR_PTR(err);
1561 }
1562
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001563 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001564 list_for_each_entry_rcu(ptype,
1565 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001566 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001568 err = ptype->gso_send_check(skb);
1569 segs = ERR_PTR(err);
1570 if (err || skb_gso_ok(skb, features))
1571 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001572 __skb_push(skb, (skb->data -
1573 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001574 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001575 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001576 break;
1577 }
1578 }
1579 rcu_read_unlock();
1580
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001581 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001582
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 return segs;
1584}
1585
1586EXPORT_SYMBOL(skb_gso_segment);
1587
Herbert Xufb286bb2005-11-10 13:01:24 -08001588/* Take action when hardware reception checksum errors are detected. */
1589#ifdef CONFIG_BUG
1590void netdev_rx_csum_fault(struct net_device *dev)
1591{
1592 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001593 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001594 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001595 dump_stack();
1596 }
1597}
1598EXPORT_SYMBOL(netdev_rx_csum_fault);
1599#endif
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601/* Actually, we should eliminate this check as soon as we know, that:
1602 * 1. IOMMU is present and allows to map all the memory.
1603 * 2. No high memory really exists on this machine.
1604 */
1605
1606static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1607{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001608#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 int i;
1610
1611 if (dev->features & NETIF_F_HIGHDMA)
1612 return 0;
1613
1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1615 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1616 return 1;
1617
Herbert Xu3d3a8532006-06-27 13:33:10 -07001618#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 return 0;
1620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001622struct dev_gso_cb {
1623 void (*destructor)(struct sk_buff *skb);
1624};
1625
1626#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1627
1628static void dev_gso_skb_destructor(struct sk_buff *skb)
1629{
1630 struct dev_gso_cb *cb;
1631
1632 do {
1633 struct sk_buff *nskb = skb->next;
1634
1635 skb->next = nskb->next;
1636 nskb->next = NULL;
1637 kfree_skb(nskb);
1638 } while (skb->next);
1639
1640 cb = DEV_GSO_CB(skb);
1641 if (cb->destructor)
1642 cb->destructor(skb);
1643}
1644
1645/**
1646 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1647 * @skb: buffer to segment
1648 *
1649 * This function segments the given skb and stores the list of segments
1650 * in skb->next.
1651 */
1652static int dev_gso_segment(struct sk_buff *skb)
1653{
1654 struct net_device *dev = skb->dev;
1655 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1657 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001658
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 segs = skb_gso_segment(skb, features);
1660
1661 /* Verifying header integrity only. */
1662 if (!segs)
1663 return 0;
1664
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001665 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666 return PTR_ERR(segs);
1667
1668 skb->next = segs;
1669 DEV_GSO_CB(skb)->destructor = skb->destructor;
1670 skb->destructor = dev_gso_skb_destructor;
1671
1672 return 0;
1673}
1674
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001675int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1676 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001677{
Stephen Hemminger00829822008-11-20 20:14:53 -08001678 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001679 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001680
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001681 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001682 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001683 dev_queue_xmit_nit(skb, dev);
1684
Herbert Xu576a30e2006-06-27 13:22:38 -07001685 if (netif_needs_gso(dev, skb)) {
1686 if (unlikely(dev_gso_segment(skb)))
1687 goto out_kfree_skb;
1688 if (skb->next)
1689 goto gso;
1690 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001691
Eric Dumazet93f154b2009-05-18 22:19:19 -07001692 /*
1693 * If device doesnt need skb->dst, release it right now while
1694 * its hot in this cpu cache
1695 */
1696 if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) {
1697 dst_release(skb->dst);
1698 skb->dst = NULL;
1699 }
Patrick Ohlyac45f602009-02-12 05:03:37 +00001700 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001701 if (rc == 0)
1702 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001703 /*
1704 * TODO: if skb_orphan() was called by
1705 * dev->hard_start_xmit() (for example, the unmodified
1706 * igb driver does that; bnx2 doesn't), then
1707 * skb_tx_software_timestamp() will be unable to send
1708 * back the time stamp.
1709 *
1710 * How can this be prevented? Always create another
1711 * reference to the socket before calling
1712 * dev->hard_start_xmit()? Prevent that skb_orphan()
1713 * does anything in dev->hard_start_xmit() by clearing
1714 * the skb destructor before the call and restoring it
1715 * afterwards, then doing the skb_orphan() ourselves?
1716 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001717 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001718 }
1719
Herbert Xu576a30e2006-06-27 13:22:38 -07001720gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001721 do {
1722 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001723
1724 skb->next = nskb->next;
1725 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001726 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001728 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729 skb->next = nskb;
1730 return rc;
1731 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001732 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001733 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001736
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001737 skb->destructor = DEV_GSO_CB(skb)->destructor;
1738
1739out_kfree_skb:
1740 kfree_skb(skb);
1741 return 0;
1742}
1743
David S. Miller70192982009-01-27 16:34:47 -08001744static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001745
Stephen Hemminger92477442009-03-21 13:39:26 -07001746u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001747{
David S. Miller70192982009-01-27 16:34:47 -08001748 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001749
David S. Miller513de112009-05-03 14:43:10 -07001750 if (skb_rx_queue_recorded(skb)) {
1751 hash = skb_get_rx_queue(skb);
1752 while (unlikely (hash >= dev->real_num_tx_queues))
1753 hash -= dev->real_num_tx_queues;
1754 return hash;
1755 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001756
1757 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001758 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001759 else
David S. Miller70192982009-01-27 16:34:47 -08001760 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001761
David S. Miller70192982009-01-27 16:34:47 -08001762 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001763
David S. Millerb6b2fed2008-07-21 09:48:06 -07001764 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001765}
Stephen Hemminger92477442009-03-21 13:39:26 -07001766EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001767
David S. Millere8a04642008-07-17 00:34:19 -07001768static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1769 struct sk_buff *skb)
1770{
Stephen Hemminger00829822008-11-20 20:14:53 -08001771 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001772 u16 queue_index = 0;
1773
Stephen Hemminger00829822008-11-20 20:14:53 -08001774 if (ops->ndo_select_queue)
1775 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001776 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001777 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001778
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001779 skb_set_queue_mapping(skb, queue_index);
1780 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001781}
1782
Dave Jonesd29f7492008-07-22 14:09:06 -07001783/**
1784 * dev_queue_xmit - transmit a buffer
1785 * @skb: buffer to transmit
1786 *
1787 * Queue a buffer for transmission to a network device. The caller must
1788 * have set the device and priority and built the buffer before calling
1789 * this function. The function can be called from an interrupt.
1790 *
1791 * A negative errno code is returned on a failure. A success does not
1792 * guarantee the frame will be transmitted as it may be dropped due
1793 * to congestion or traffic shaping.
1794 *
1795 * -----------------------------------------------------------------------------------
1796 * I notice this method can also return errors from the queue disciplines,
1797 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1798 * be positive.
1799 *
1800 * Regardless of the return value, the skb is consumed, so it is currently
1801 * difficult to retry a send to this method. (You can bump the ref count
1802 * before sending to hold a reference for retry if you are careful.)
1803 *
1804 * When calling this method, interrupts MUST be enabled. This is because
1805 * the BH enable code must have IRQs enabled so that it will not deadlock.
1806 * --BLG
1807 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808int dev_queue_xmit(struct sk_buff *skb)
1809{
1810 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001811 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct Qdisc *q;
1813 int rc = -ENOMEM;
1814
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001815 /* GSO will handle the following emulations directly. */
1816 if (netif_needs_gso(dev, skb))
1817 goto gso;
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 if (skb_shinfo(skb)->frag_list &&
1820 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001821 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 goto out_kfree_skb;
1823
1824 /* Fragmented skb is linearized if device does not support SG,
1825 * or if at least one of fragments is in highmem and device
1826 * does not support DMA from it.
1827 */
1828 if (skb_shinfo(skb)->nr_frags &&
1829 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001830 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 goto out_kfree_skb;
1832
1833 /* If packet is not checksummed and device does not support
1834 * checksumming for this protocol, complete checksumming here.
1835 */
Herbert Xu663ead32007-04-09 11:59:07 -07001836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1837 skb_set_transport_header(skb, skb->csum_start -
1838 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001839 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1840 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001843gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001844 /* Disable soft irqs for various locks below. Also
1845 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001847 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
David S. Millereae792b2008-07-15 03:03:33 -07001849 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001850 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852#ifdef CONFIG_NET_CLS_ACT
1853 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1854#endif
1855 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001856 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
David S. Miller37437bb2008-07-16 02:15:04 -07001858 spin_lock(root_lock);
1859
David S. Millera9312ae2008-08-17 21:51:03 -07001860 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001861 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001862 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001863 } else {
1864 rc = qdisc_enqueue_root(skb, q);
1865 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001866 }
David S. Miller37437bb2008-07-16 02:15:04 -07001867 spin_unlock(root_lock);
1868
David S. Miller37437bb2008-07-16 02:15:04 -07001869 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871
1872 /* The device has no queue. Common case for software devices:
1873 loopback, all the sorts of tunnels...
1874
Herbert Xu932ff272006-06-09 12:20:56 -07001875 Really, it is unlikely that netif_tx_lock protection is necessary
1876 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 counters.)
1878 However, it is possible, that they rely on protection
1879 made by us here.
1880
1881 Check this and shot the lock. It is not prone from deadlocks.
1882 Either shot noqueue qdisc, it is even simpler 8)
1883 */
1884 if (dev->flags & IFF_UP) {
1885 int cpu = smp_processor_id(); /* ok because BHs are off */
1886
David S. Millerc773e842008-07-08 23:13:53 -07001887 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
David S. Millerc773e842008-07-08 23:13:53 -07001889 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001891 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001893 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001894 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 goto out;
1896 }
1897 }
David S. Millerc773e842008-07-08 23:13:53 -07001898 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 if (net_ratelimit())
1900 printk(KERN_CRIT "Virtual device %s asks to "
1901 "queue packet!\n", dev->name);
1902 } else {
1903 /* Recursion is detected! It is possible,
1904 * unfortunately */
1905 if (net_ratelimit())
1906 printk(KERN_CRIT "Dead loop on virtual device "
1907 "%s, fix it urgently!\n", dev->name);
1908 }
1909 }
1910
1911 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001912 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914out_kfree_skb:
1915 kfree_skb(skb);
1916 return rc;
1917out:
Herbert Xud4828d82006-06-22 02:28:18 -07001918 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 return rc;
1920}
1921
1922
1923/*=======================================================================
1924 Receiver routines
1925 =======================================================================*/
1926
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001927int netdev_max_backlog __read_mostly = 1000;
1928int netdev_budget __read_mostly = 300;
1929int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1932
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934/**
1935 * netif_rx - post buffer to the network code
1936 * @skb: buffer to post
1937 *
1938 * This function receives a packet from a device driver and queues it for
1939 * the upper (protocol) levels to process. It always succeeds. The buffer
1940 * may be dropped during processing for congestion control or by the
1941 * protocol layers.
1942 *
1943 * return values:
1944 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 * NET_RX_DROP (packet was dropped)
1946 *
1947 */
1948
1949int netif_rx(struct sk_buff *skb)
1950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 struct softnet_data *queue;
1952 unsigned long flags;
1953
1954 /* if netpoll wants it, pretend we never saw it */
1955 if (netpoll_rx(skb))
1956 return NET_RX_DROP;
1957
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001958 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001959 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 /*
1962 * The code is rearranged so that the path is the most
1963 * short when CPU is congested, but is still operating.
1964 */
1965 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 queue = &__get_cpu_var(softnet_data);
1967
1968 __get_cpu_var(netdev_rx_stat).total++;
1969 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1970 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001974 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 }
1976
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001977 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 goto enqueue;
1979 }
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 __get_cpu_var(netdev_rx_stat).dropped++;
1982 local_irq_restore(flags);
1983
1984 kfree_skb(skb);
1985 return NET_RX_DROP;
1986}
1987
1988int netif_rx_ni(struct sk_buff *skb)
1989{
1990 int err;
1991
1992 preempt_disable();
1993 err = netif_rx(skb);
1994 if (local_softirq_pending())
1995 do_softirq();
1996 preempt_enable();
1997
1998 return err;
1999}
2000
2001EXPORT_SYMBOL(netif_rx_ni);
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003static void net_tx_action(struct softirq_action *h)
2004{
2005 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2006
2007 if (sd->completion_queue) {
2008 struct sk_buff *clist;
2009
2010 local_irq_disable();
2011 clist = sd->completion_queue;
2012 sd->completion_queue = NULL;
2013 local_irq_enable();
2014
2015 while (clist) {
2016 struct sk_buff *skb = clist;
2017 clist = clist->next;
2018
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002019 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 __kfree_skb(skb);
2021 }
2022 }
2023
2024 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002025 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027 local_irq_disable();
2028 head = sd->output_queue;
2029 sd->output_queue = NULL;
2030 local_irq_enable();
2031
2032 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002033 struct Qdisc *q = head;
2034 spinlock_t *root_lock;
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 head = head->next_sched;
2037
David S. Miller5fb66222008-08-02 20:02:43 -07002038 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002039 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002040 smp_mb__before_clear_bit();
2041 clear_bit(__QDISC_STATE_SCHED,
2042 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002043 qdisc_run(q);
2044 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002046 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002047 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002048 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002049 } else {
2050 smp_mb__before_clear_bit();
2051 clear_bit(__QDISC_STATE_SCHED,
2052 &q->state);
2053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
2055 }
2056 }
2057}
2058
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002059static inline int deliver_skb(struct sk_buff *skb,
2060 struct packet_type *pt_prev,
2061 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002064 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}
2066
2067#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002068/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069struct net_bridge;
2070struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2071 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002072void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Stephen Hemminger6229e362007-03-21 13:38:47 -07002074/*
2075 * If bridge module is loaded call bridging hook.
2076 * returns NULL if packet was consumed.
2077 */
2078struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2079 struct sk_buff *skb) __read_mostly;
2080static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2081 struct packet_type **pt_prev, int *ret,
2082 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
2084 struct net_bridge_port *port;
2085
Stephen Hemminger6229e362007-03-21 13:38:47 -07002086 if (skb->pkt_type == PACKET_LOOPBACK ||
2087 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2088 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002091 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002093 }
2094
Stephen Hemminger6229e362007-03-21 13:38:47 -07002095 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002098#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099#endif
2100
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002101#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2102struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2103EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2104
2105static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2106 struct packet_type **pt_prev,
2107 int *ret,
2108 struct net_device *orig_dev)
2109{
2110 if (skb->dev->macvlan_port == NULL)
2111 return skb;
2112
2113 if (*pt_prev) {
2114 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2115 *pt_prev = NULL;
2116 }
2117 return macvlan_handle_frame_hook(skb);
2118}
2119#else
2120#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2121#endif
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123#ifdef CONFIG_NET_CLS_ACT
2124/* TODO: Maybe we should just force sch_ingress to be compiled in
2125 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2126 * a compare and 2 stores extra right now if we dont have it on
2127 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002128 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 * the ingress scheduler, you just cant add policies on ingress.
2130 *
2131 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002132static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002135 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002136 struct netdev_queue *rxq;
2137 int result = TC_ACT_OK;
2138 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002139
Herbert Xuf697c3e2007-10-14 00:38:47 -07002140 if (MAX_RED_LOOP < ttl++) {
2141 printk(KERN_WARNING
2142 "Redir loop detected Dropping packet (%d->%d)\n",
2143 skb->iif, dev->ifindex);
2144 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 }
2146
Herbert Xuf697c3e2007-10-14 00:38:47 -07002147 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2148 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2149
David S. Miller555353c2008-07-08 17:33:13 -07002150 rxq = &dev->rx_queue;
2151
David S. Miller83874002008-07-17 00:53:03 -07002152 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002153 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002154 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002155 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2156 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002157 spin_unlock(qdisc_lock(q));
2158 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return result;
2161}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002162
2163static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2164 struct packet_type **pt_prev,
2165 int *ret, struct net_device *orig_dev)
2166{
David S. Miller8d50b532008-07-30 02:37:46 -07002167 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002168 goto out;
2169
2170 if (*pt_prev) {
2171 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2172 *pt_prev = NULL;
2173 } else {
2174 /* Huh? Why does turning on AF_PACKET affect this? */
2175 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2176 }
2177
2178 switch (ing_filter(skb)) {
2179 case TC_ACT_SHOT:
2180 case TC_ACT_STOLEN:
2181 kfree_skb(skb);
2182 return NULL;
2183 }
2184
2185out:
2186 skb->tc_verd = 0;
2187 return skb;
2188}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189#endif
2190
Patrick McHardybc1d0412008-07-14 22:49:30 -07002191/*
2192 * netif_nit_deliver - deliver received packets to network taps
2193 * @skb: buffer
2194 *
2195 * This function is used to deliver incoming packets to network
2196 * taps. It should be used when the normal netif_receive_skb path
2197 * is bypassed, for example because of VLAN acceleration.
2198 */
2199void netif_nit_deliver(struct sk_buff *skb)
2200{
2201 struct packet_type *ptype;
2202
2203 if (list_empty(&ptype_all))
2204 return;
2205
2206 skb_reset_network_header(skb);
2207 skb_reset_transport_header(skb);
2208 skb->mac_len = skb->network_header - skb->mac_header;
2209
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2212 if (!ptype->dev || ptype->dev == skb->dev)
2213 deliver_skb(skb, ptype, skb->dev);
2214 }
2215 rcu_read_unlock();
2216}
2217
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002218/**
2219 * netif_receive_skb - process receive buffer from network
2220 * @skb: buffer to process
2221 *
2222 * netif_receive_skb() is the main receive data processing function.
2223 * It always succeeds. The buffer may be dropped during processing
2224 * for congestion control or by the protocol layers.
2225 *
2226 * This function may only be called from softirq context and interrupts
2227 * should be enabled.
2228 *
2229 * Return values (usually ignored):
2230 * NET_RX_SUCCESS: no congestion
2231 * NET_RX_DROP: packet was dropped
2232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233int netif_receive_skb(struct sk_buff *skb)
2234{
2235 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002236 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002237 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002239 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002241 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2242 return NET_RX_SUCCESS;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002245 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return NET_RX_DROP;
2247
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002248 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002249 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Patrick McHardyc01003c2007-03-29 11:46:52 -07002251 if (!skb->iif)
2252 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002253
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002254 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002255 orig_dev = skb->dev;
2256 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002257 if (skb_bond_should_drop(skb))
2258 null_or_orig = orig_dev; /* deliver only exact match */
2259 else
2260 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002261 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 __get_cpu_var(netdev_rx_stat).total++;
2264
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002265 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002266 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002267 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 pt_prev = NULL;
2270
2271 rcu_read_lock();
2272
2273#ifdef CONFIG_NET_CLS_ACT
2274 if (skb->tc_verd & TC_NCLS) {
2275 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2276 goto ncls;
2277 }
2278#endif
2279
2280 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002281 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2282 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002283 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002284 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 pt_prev = ptype;
2286 }
2287 }
2288
2289#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002290 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2291 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293ncls:
2294#endif
2295
Stephen Hemminger6229e362007-03-21 13:38:47 -07002296 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2297 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002299 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2300 if (!skb)
2301 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Herbert Xu9a279bc2009-02-04 16:55:27 -08002303 skb_orphan(skb);
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002306 list_for_each_entry_rcu(ptype,
2307 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002309 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2310 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002311 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002312 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 pt_prev = ptype;
2314 }
2315 }
2316
2317 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002318 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 } else {
2320 kfree_skb(skb);
2321 /* Jamal, now you will not able to escape explaining
2322 * me how you were going to use this. :-)
2323 */
2324 ret = NET_RX_DROP;
2325 }
2326
2327out:
2328 rcu_read_unlock();
2329 return ret;
2330}
2331
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002332/* Network device is going away, flush any packets still pending */
2333static void flush_backlog(void *arg)
2334{
2335 struct net_device *dev = arg;
2336 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2337 struct sk_buff *skb, *tmp;
2338
2339 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2340 if (skb->dev == dev) {
2341 __skb_unlink(skb, &queue->input_pkt_queue);
2342 kfree_skb(skb);
2343 }
2344}
2345
Herbert Xud565b0a2008-12-15 23:38:52 -08002346static int napi_gro_complete(struct sk_buff *skb)
2347{
2348 struct packet_type *ptype;
2349 __be16 type = skb->protocol;
2350 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2351 int err = -ENOENT;
2352
Herbert Xufc59f9a2009-04-14 15:11:06 -07002353 if (NAPI_GRO_CB(skb)->count == 1) {
2354 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002355 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002356 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002357
2358 rcu_read_lock();
2359 list_for_each_entry_rcu(ptype, head, list) {
2360 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2361 continue;
2362
2363 err = ptype->gro_complete(skb);
2364 break;
2365 }
2366 rcu_read_unlock();
2367
2368 if (err) {
2369 WARN_ON(&ptype->list == head);
2370 kfree_skb(skb);
2371 return NET_RX_SUCCESS;
2372 }
2373
2374out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002375 return netif_receive_skb(skb);
2376}
2377
2378void napi_gro_flush(struct napi_struct *napi)
2379{
2380 struct sk_buff *skb, *next;
2381
2382 for (skb = napi->gro_list; skb; skb = next) {
2383 next = skb->next;
2384 skb->next = NULL;
2385 napi_gro_complete(skb);
2386 }
2387
Herbert Xu4ae55442009-02-08 18:00:36 +00002388 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002389 napi->gro_list = NULL;
2390}
2391EXPORT_SYMBOL(napi_gro_flush);
2392
Herbert Xu86911732009-01-29 14:19:50 +00002393void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2394{
2395 unsigned int offset = skb_gro_offset(skb);
2396
2397 hlen += offset;
Herbert Xuedbd9e32009-04-27 05:44:29 -07002398 if (unlikely(skb_headlen(skb) ||
2399 skb_shinfo(skb)->frags[0].size < hlen ||
Herbert Xu86911732009-01-29 14:19:50 +00002400 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2401 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2402
2403 return page_address(skb_shinfo(skb)->frags[0].page) +
Herbert Xuedbd9e32009-04-27 05:44:29 -07002404 skb_shinfo(skb)->frags[0].page_offset + offset;
Herbert Xu86911732009-01-29 14:19:50 +00002405}
2406EXPORT_SYMBOL(skb_gro_header);
2407
Herbert Xu96e93ea2009-01-06 10:49:34 -08002408int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002409{
2410 struct sk_buff **pp = NULL;
2411 struct packet_type *ptype;
2412 __be16 type = skb->protocol;
2413 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002414 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002415 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002416 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002417
2418 if (!(skb->dev->features & NETIF_F_GRO))
2419 goto normal;
2420
Herbert Xuf17f5c92009-01-14 14:36:12 -08002421 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2422 goto normal;
2423
Herbert Xud565b0a2008-12-15 23:38:52 -08002424 rcu_read_lock();
2425 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002426 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2427 continue;
2428
Herbert Xu86911732009-01-29 14:19:50 +00002429 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002430 mac_len = skb->network_header - skb->mac_header;
2431 skb->mac_len = mac_len;
2432 NAPI_GRO_CB(skb)->same_flow = 0;
2433 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002434 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002435
Herbert Xud565b0a2008-12-15 23:38:52 -08002436 pp = ptype->gro_receive(&napi->gro_list, skb);
2437 break;
2438 }
2439 rcu_read_unlock();
2440
2441 if (&ptype->list == head)
2442 goto normal;
2443
Herbert Xu0da2afd52008-12-26 14:57:42 -08002444 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002445 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002446
Herbert Xud565b0a2008-12-15 23:38:52 -08002447 if (pp) {
2448 struct sk_buff *nskb = *pp;
2449
2450 *pp = nskb->next;
2451 nskb->next = NULL;
2452 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002453 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002454 }
2455
Herbert Xu0da2afd52008-12-26 14:57:42 -08002456 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002457 goto ok;
2458
Herbert Xu4ae55442009-02-08 18:00:36 +00002459 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002460 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002461
Herbert Xu4ae55442009-02-08 18:00:36 +00002462 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002463 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002464 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002465 skb->next = napi->gro_list;
2466 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002467 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002468
Herbert Xuad0f9902009-02-01 01:24:55 -08002469pull:
2470 if (unlikely(!pskb_may_pull(skb, skb_gro_offset(skb)))) {
2471 if (napi->gro_list == skb)
2472 napi->gro_list = skb->next;
2473 ret = GRO_DROP;
2474 }
2475
Herbert Xud565b0a2008-12-15 23:38:52 -08002476ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002477 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002478
2479normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002480 ret = GRO_NORMAL;
2481 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002482}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002483EXPORT_SYMBOL(dev_gro_receive);
2484
2485static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2486{
2487 struct sk_buff *p;
2488
Herbert Xud1c76af2009-03-16 10:50:02 -07002489 if (netpoll_rx_on(skb))
2490 return GRO_NORMAL;
2491
Herbert Xu96e93ea2009-01-06 10:49:34 -08002492 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002493 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2494 && !compare_ether_header(skb_mac_header(p),
2495 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002496 NAPI_GRO_CB(p)->flush = 0;
2497 }
2498
2499 return dev_gro_receive(napi, skb);
2500}
Herbert Xu5d38a072009-01-04 16:13:40 -08002501
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002502int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002503{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002504 int err = NET_RX_SUCCESS;
2505
2506 switch (ret) {
2507 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002508 return netif_receive_skb(skb);
2509
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002510 case GRO_DROP:
2511 err = NET_RX_DROP;
2512 /* fall through */
2513
2514 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002515 kfree_skb(skb);
2516 break;
2517 }
2518
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002519 return err;
2520}
2521EXPORT_SYMBOL(napi_skb_finish);
2522
2523int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2524{
Herbert Xu86911732009-01-29 14:19:50 +00002525 skb_gro_reset_offset(skb);
2526
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002527 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002528}
2529EXPORT_SYMBOL(napi_gro_receive);
2530
Herbert Xu96e93ea2009-01-06 10:49:34 -08002531void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2532{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002533 __skb_pull(skb, skb_headlen(skb));
2534 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2535
2536 napi->skb = skb;
2537}
2538EXPORT_SYMBOL(napi_reuse_skb);
2539
Herbert Xu76620aa2009-04-16 02:02:07 -07002540struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002541{
2542 struct net_device *dev = napi->dev;
2543 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002544
2545 if (!skb) {
2546 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2547 if (!skb)
2548 goto out;
2549
2550 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002551
2552 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002553 }
2554
Herbert Xu96e93ea2009-01-06 10:49:34 -08002555out:
2556 return skb;
2557}
Herbert Xu76620aa2009-04-16 02:02:07 -07002558EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002559
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002560int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2561{
2562 int err = NET_RX_SUCCESS;
2563
2564 switch (ret) {
2565 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002566 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002567 skb->protocol = eth_type_trans(skb, napi->dev);
2568
2569 if (ret == GRO_NORMAL)
2570 return netif_receive_skb(skb);
2571
2572 skb_gro_pull(skb, -ETH_HLEN);
2573 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002574
2575 case GRO_DROP:
2576 err = NET_RX_DROP;
2577 /* fall through */
2578
2579 case GRO_MERGED_FREE:
2580 napi_reuse_skb(napi, skb);
2581 break;
2582 }
2583
2584 return err;
2585}
2586EXPORT_SYMBOL(napi_frags_finish);
2587
Herbert Xu76620aa2009-04-16 02:02:07 -07002588struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002589{
Herbert Xu76620aa2009-04-16 02:02:07 -07002590 struct sk_buff *skb = napi->skb;
2591 struct ethhdr *eth;
2592
2593 napi->skb = NULL;
2594
2595 skb_reset_mac_header(skb);
2596 skb_gro_reset_offset(skb);
2597
2598 eth = skb_gro_header(skb, sizeof(*eth));
2599 if (!eth) {
2600 napi_reuse_skb(napi, skb);
2601 skb = NULL;
2602 goto out;
2603 }
2604
2605 skb_gro_pull(skb, sizeof(*eth));
2606
2607 /*
2608 * This works because the only protocols we care about don't require
2609 * special handling. We'll fix it up properly at the end.
2610 */
2611 skb->protocol = eth->h_proto;
2612
2613out:
2614 return skb;
2615}
2616EXPORT_SYMBOL(napi_frags_skb);
2617
2618int napi_gro_frags(struct napi_struct *napi)
2619{
2620 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002621
2622 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002623 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002624
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002625 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002626}
2627EXPORT_SYMBOL(napi_gro_frags);
2628
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002629static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630{
2631 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2633 unsigned long start_time = jiffies;
2634
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002635 napi->weight = weight_p;
2636 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
2639 local_irq_disable();
2640 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002641 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002642 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002643 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002644 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 local_irq_enable();
2647
Herbert Xu8f1ead22009-03-26 00:59:10 -07002648 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002649 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002651 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652}
2653
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002654/**
2655 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002656 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002657 *
2658 * The entry's receive function will be scheduled to run
2659 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002660void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002661{
2662 unsigned long flags;
2663
2664 local_irq_save(flags);
2665 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2666 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2667 local_irq_restore(flags);
2668}
2669EXPORT_SYMBOL(__napi_schedule);
2670
Herbert Xud565b0a2008-12-15 23:38:52 -08002671void __napi_complete(struct napi_struct *n)
2672{
2673 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2674 BUG_ON(n->gro_list);
2675
2676 list_del(&n->poll_list);
2677 smp_mb__before_clear_bit();
2678 clear_bit(NAPI_STATE_SCHED, &n->state);
2679}
2680EXPORT_SYMBOL(__napi_complete);
2681
2682void napi_complete(struct napi_struct *n)
2683{
2684 unsigned long flags;
2685
2686 /*
2687 * don't let napi dequeue from the cpu poll list
2688 * just in case its running on a different cpu
2689 */
2690 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2691 return;
2692
2693 napi_gro_flush(n);
2694 local_irq_save(flags);
2695 __napi_complete(n);
2696 local_irq_restore(flags);
2697}
2698EXPORT_SYMBOL(napi_complete);
2699
2700void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2701 int (*poll)(struct napi_struct *, int), int weight)
2702{
2703 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002704 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002705 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002706 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002707 napi->poll = poll;
2708 napi->weight = weight;
2709 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002710 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002711#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002712 spin_lock_init(&napi->poll_lock);
2713 napi->poll_owner = -1;
2714#endif
2715 set_bit(NAPI_STATE_SCHED, &napi->state);
2716}
2717EXPORT_SYMBOL(netif_napi_add);
2718
2719void netif_napi_del(struct napi_struct *napi)
2720{
2721 struct sk_buff *skb, *next;
2722
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002723 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002724 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002725
2726 for (skb = napi->gro_list; skb; skb = next) {
2727 next = skb->next;
2728 skb->next = NULL;
2729 kfree_skb(skb);
2730 }
2731
2732 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002733 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002734}
2735EXPORT_SYMBOL(netif_napi_del);
2736
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002737
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738static void net_rx_action(struct softirq_action *h)
2739{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002740 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002741 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002742 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002743 void *have;
2744
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 local_irq_disable();
2746
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002747 while (!list_empty(list)) {
2748 struct napi_struct *n;
2749 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002751 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002752 * Allow this to run for 2 jiffies since which will allow
2753 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002754 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002755 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 goto softnet_break;
2757
2758 local_irq_enable();
2759
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002760 /* Even though interrupts have been re-enabled, this
2761 * access is safe because interrupts can only add new
2762 * entries to the tail of this list, and only ->poll()
2763 * calls can remove this head entry from the list.
2764 */
2765 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002767 have = netpoll_poll_lock(n);
2768
2769 weight = n->weight;
2770
David S. Miller0a7606c2007-10-29 21:28:47 -07002771 /* This NAPI_STATE_SCHED test is for avoiding a race
2772 * with netpoll's poll_napi(). Only the entity which
2773 * obtains the lock and sees NAPI_STATE_SCHED set will
2774 * actually make the ->poll() call. Therefore we avoid
2775 * accidently calling ->poll() when NAPI is not scheduled.
2776 */
2777 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002778 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002779 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002780 trace_napi_poll(n);
2781 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002782
2783 WARN_ON_ONCE(work > weight);
2784
2785 budget -= work;
2786
2787 local_irq_disable();
2788
2789 /* Drivers must not modify the NAPI state if they
2790 * consume the entire weight. In such cases this code
2791 * still "owns" the NAPI instance and therefore can
2792 * move the instance around on the list at-will.
2793 */
David S. Millerfed17f32008-01-07 21:00:40 -08002794 if (unlikely(work == weight)) {
2795 if (unlikely(napi_disable_pending(n)))
2796 __napi_complete(n);
2797 else
2798 list_move_tail(&n->poll_list, list);
2799 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002800
2801 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 }
2803out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002804 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002805
Chris Leechdb217332006-06-17 21:24:58 -07002806#ifdef CONFIG_NET_DMA
2807 /*
2808 * There may not be any more sk_buffs coming right now, so push
2809 * any pending DMA copies to hardware
2810 */
Dan Williams2ba05622009-01-06 11:38:14 -07002811 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002812#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 return;
2815
2816softnet_break:
2817 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2818 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2819 goto out;
2820}
2821
2822static gifconf_func_t * gifconf_list [NPROTO];
2823
2824/**
2825 * register_gifconf - register a SIOCGIF handler
2826 * @family: Address family
2827 * @gifconf: Function handler
2828 *
2829 * Register protocol dependent address dumping routines. The handler
2830 * that is passed must not be freed or reused until it has been replaced
2831 * by another handler.
2832 */
2833int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2834{
2835 if (family >= NPROTO)
2836 return -EINVAL;
2837 gifconf_list[family] = gifconf;
2838 return 0;
2839}
2840
2841
2842/*
2843 * Map an interface index to its name (SIOCGIFNAME)
2844 */
2845
2846/*
2847 * We need this ioctl for efficient implementation of the
2848 * if_indextoname() function required by the IPv6 API. Without
2849 * it, we would have to search all the interfaces to find a
2850 * match. --pb
2851 */
2852
Eric W. Biederman881d9662007-09-17 11:56:21 -07002853static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854{
2855 struct net_device *dev;
2856 struct ifreq ifr;
2857
2858 /*
2859 * Fetch the caller's info block.
2860 */
2861
2862 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2863 return -EFAULT;
2864
2865 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002866 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (!dev) {
2868 read_unlock(&dev_base_lock);
2869 return -ENODEV;
2870 }
2871
2872 strcpy(ifr.ifr_name, dev->name);
2873 read_unlock(&dev_base_lock);
2874
2875 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2876 return -EFAULT;
2877 return 0;
2878}
2879
2880/*
2881 * Perform a SIOCGIFCONF call. This structure will change
2882 * size eventually, and there is nothing I can do about it.
2883 * Thus we will need a 'compatibility mode'.
2884 */
2885
Eric W. Biederman881d9662007-09-17 11:56:21 -07002886static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
2888 struct ifconf ifc;
2889 struct net_device *dev;
2890 char __user *pos;
2891 int len;
2892 int total;
2893 int i;
2894
2895 /*
2896 * Fetch the caller's info block.
2897 */
2898
2899 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2900 return -EFAULT;
2901
2902 pos = ifc.ifc_buf;
2903 len = ifc.ifc_len;
2904
2905 /*
2906 * Loop over the interfaces, and write an info block for each.
2907 */
2908
2909 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002910 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 for (i = 0; i < NPROTO; i++) {
2912 if (gifconf_list[i]) {
2913 int done;
2914 if (!pos)
2915 done = gifconf_list[i](dev, NULL, 0);
2916 else
2917 done = gifconf_list[i](dev, pos + total,
2918 len - total);
2919 if (done < 0)
2920 return -EFAULT;
2921 total += done;
2922 }
2923 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
2926 /*
2927 * All done. Write the updated control block back to the caller.
2928 */
2929 ifc.ifc_len = total;
2930
2931 /*
2932 * Both BSD and Solaris return 0 here, so we do too.
2933 */
2934 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2935}
2936
2937#ifdef CONFIG_PROC_FS
2938/*
2939 * This is invoked by the /proc filesystem handler to display a device
2940 * in detail.
2941 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002943 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944{
Denis V. Luneve372c412007-11-19 22:31:54 -08002945 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002946 loff_t off;
2947 struct net_device *dev;
2948
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002950 if (!*pos)
2951 return SEQ_START_TOKEN;
2952
2953 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002954 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002955 if (off++ == *pos)
2956 return dev;
2957
2958 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959}
2960
2961void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2962{
Denis V. Luneve372c412007-11-19 22:31:54 -08002963 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002965 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002966 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967}
2968
2969void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002970 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971{
2972 read_unlock(&dev_base_lock);
2973}
2974
2975static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2976{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08002977 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Rusty Russell5a1b5892007-04-28 21:04:03 -07002979 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2980 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2981 dev->name, stats->rx_bytes, stats->rx_packets,
2982 stats->rx_errors,
2983 stats->rx_dropped + stats->rx_missed_errors,
2984 stats->rx_fifo_errors,
2985 stats->rx_length_errors + stats->rx_over_errors +
2986 stats->rx_crc_errors + stats->rx_frame_errors,
2987 stats->rx_compressed, stats->multicast,
2988 stats->tx_bytes, stats->tx_packets,
2989 stats->tx_errors, stats->tx_dropped,
2990 stats->tx_fifo_errors, stats->collisions,
2991 stats->tx_carrier_errors +
2992 stats->tx_aborted_errors +
2993 stats->tx_window_errors +
2994 stats->tx_heartbeat_errors,
2995 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996}
2997
2998/*
2999 * Called from the PROCfs module. This now uses the new arbitrary sized
3000 * /proc/net interface to create /proc/net/dev
3001 */
3002static int dev_seq_show(struct seq_file *seq, void *v)
3003{
3004 if (v == SEQ_START_TOKEN)
3005 seq_puts(seq, "Inter-| Receive "
3006 " | Transmit\n"
3007 " face |bytes packets errs drop fifo frame "
3008 "compressed multicast|bytes packets errs "
3009 "drop fifo colls carrier compressed\n");
3010 else
3011 dev_seq_printf_stats(seq, v);
3012 return 0;
3013}
3014
3015static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3016{
3017 struct netif_rx_stats *rc = NULL;
3018
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003019 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003020 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021 rc = &per_cpu(netdev_rx_stat, *pos);
3022 break;
3023 } else
3024 ++*pos;
3025 return rc;
3026}
3027
3028static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3029{
3030 return softnet_get_online(pos);
3031}
3032
3033static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3034{
3035 ++*pos;
3036 return softnet_get_online(pos);
3037}
3038
3039static void softnet_seq_stop(struct seq_file *seq, void *v)
3040{
3041}
3042
3043static int softnet_seq_show(struct seq_file *seq, void *v)
3044{
3045 struct netif_rx_stats *s = v;
3046
3047 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003048 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003049 0, 0, 0, 0, /* was fastroute */
3050 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 return 0;
3052}
3053
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003054static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 .start = dev_seq_start,
3056 .next = dev_seq_next,
3057 .stop = dev_seq_stop,
3058 .show = dev_seq_show,
3059};
3060
3061static int dev_seq_open(struct inode *inode, struct file *file)
3062{
Denis V. Luneve372c412007-11-19 22:31:54 -08003063 return seq_open_net(inode, file, &dev_seq_ops,
3064 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065}
3066
Arjan van de Ven9a321442007-02-12 00:55:35 -08003067static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 .owner = THIS_MODULE,
3069 .open = dev_seq_open,
3070 .read = seq_read,
3071 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003072 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073};
3074
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003075static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 .start = softnet_seq_start,
3077 .next = softnet_seq_next,
3078 .stop = softnet_seq_stop,
3079 .show = softnet_seq_show,
3080};
3081
3082static int softnet_seq_open(struct inode *inode, struct file *file)
3083{
3084 return seq_open(file, &softnet_seq_ops);
3085}
3086
Arjan van de Ven9a321442007-02-12 00:55:35 -08003087static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 .owner = THIS_MODULE,
3089 .open = softnet_seq_open,
3090 .read = seq_read,
3091 .llseek = seq_lseek,
3092 .release = seq_release,
3093};
3094
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003095static void *ptype_get_idx(loff_t pos)
3096{
3097 struct packet_type *pt = NULL;
3098 loff_t i = 0;
3099 int t;
3100
3101 list_for_each_entry_rcu(pt, &ptype_all, list) {
3102 if (i == pos)
3103 return pt;
3104 ++i;
3105 }
3106
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003107 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003108 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3109 if (i == pos)
3110 return pt;
3111 ++i;
3112 }
3113 }
3114 return NULL;
3115}
3116
3117static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003118 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003119{
3120 rcu_read_lock();
3121 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3122}
3123
3124static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3125{
3126 struct packet_type *pt;
3127 struct list_head *nxt;
3128 int hash;
3129
3130 ++*pos;
3131 if (v == SEQ_START_TOKEN)
3132 return ptype_get_idx(0);
3133
3134 pt = v;
3135 nxt = pt->list.next;
3136 if (pt->type == htons(ETH_P_ALL)) {
3137 if (nxt != &ptype_all)
3138 goto found;
3139 hash = 0;
3140 nxt = ptype_base[0].next;
3141 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003142 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003143
3144 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003145 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003146 return NULL;
3147 nxt = ptype_base[hash].next;
3148 }
3149found:
3150 return list_entry(nxt, struct packet_type, list);
3151}
3152
3153static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003154 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003155{
3156 rcu_read_unlock();
3157}
3158
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003159static int ptype_seq_show(struct seq_file *seq, void *v)
3160{
3161 struct packet_type *pt = v;
3162
3163 if (v == SEQ_START_TOKEN)
3164 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003165 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003166 if (pt->type == htons(ETH_P_ALL))
3167 seq_puts(seq, "ALL ");
3168 else
3169 seq_printf(seq, "%04x", ntohs(pt->type));
3170
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003171 seq_printf(seq, " %-8s %pF\n",
3172 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003173 }
3174
3175 return 0;
3176}
3177
3178static const struct seq_operations ptype_seq_ops = {
3179 .start = ptype_seq_start,
3180 .next = ptype_seq_next,
3181 .stop = ptype_seq_stop,
3182 .show = ptype_seq_show,
3183};
3184
3185static int ptype_seq_open(struct inode *inode, struct file *file)
3186{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003187 return seq_open_net(inode, file, &ptype_seq_ops,
3188 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003189}
3190
3191static const struct file_operations ptype_seq_fops = {
3192 .owner = THIS_MODULE,
3193 .open = ptype_seq_open,
3194 .read = seq_read,
3195 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003196 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003197};
3198
3199
Pavel Emelyanov46650792007-10-08 20:38:39 -07003200static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201{
3202 int rc = -ENOMEM;
3203
Eric W. Biederman881d9662007-09-17 11:56:21 -07003204 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003206 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003208 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003209 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003210
Eric W. Biederman881d9662007-09-17 11:56:21 -07003211 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003212 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 rc = 0;
3214out:
3215 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003216out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003217 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003219 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003221 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 goto out;
3223}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003224
Pavel Emelyanov46650792007-10-08 20:38:39 -07003225static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003226{
3227 wext_proc_exit(net);
3228
3229 proc_net_remove(net, "ptype");
3230 proc_net_remove(net, "softnet_stat");
3231 proc_net_remove(net, "dev");
3232}
3233
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003234static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003235 .init = dev_proc_net_init,
3236 .exit = dev_proc_net_exit,
3237};
3238
3239static int __init dev_proc_init(void)
3240{
3241 return register_pernet_subsys(&dev_proc_ops);
3242}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243#else
3244#define dev_proc_init() 0
3245#endif /* CONFIG_PROC_FS */
3246
3247
3248/**
3249 * netdev_set_master - set up master/slave pair
3250 * @slave: slave device
3251 * @master: new master device
3252 *
3253 * Changes the master device of the slave. Pass %NULL to break the
3254 * bonding. The caller must hold the RTNL semaphore. On a failure
3255 * a negative errno code is returned. On success the reference counts
3256 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3257 * function returns zero.
3258 */
3259int netdev_set_master(struct net_device *slave, struct net_device *master)
3260{
3261 struct net_device *old = slave->master;
3262
3263 ASSERT_RTNL();
3264
3265 if (master) {
3266 if (old)
3267 return -EBUSY;
3268 dev_hold(master);
3269 }
3270
3271 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003272
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273 synchronize_net();
3274
3275 if (old)
3276 dev_put(old);
3277
3278 if (master)
3279 slave->flags |= IFF_SLAVE;
3280 else
3281 slave->flags &= ~IFF_SLAVE;
3282
3283 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3284 return 0;
3285}
3286
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003287static void dev_change_rx_flags(struct net_device *dev, int flags)
3288{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003289 const struct net_device_ops *ops = dev->netdev_ops;
3290
3291 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3292 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003293}
3294
Wang Chendad9b332008-06-18 01:48:28 -07003295static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003296{
3297 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003298 uid_t uid;
3299 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003300
Patrick McHardy24023452007-07-14 18:51:31 -07003301 ASSERT_RTNL();
3302
Wang Chendad9b332008-06-18 01:48:28 -07003303 dev->flags |= IFF_PROMISC;
3304 dev->promiscuity += inc;
3305 if (dev->promiscuity == 0) {
3306 /*
3307 * Avoid overflow.
3308 * If inc causes overflow, untouch promisc and return error.
3309 */
3310 if (inc < 0)
3311 dev->flags &= ~IFF_PROMISC;
3312 else {
3313 dev->promiscuity -= inc;
3314 printk(KERN_WARNING "%s: promiscuity touches roof, "
3315 "set promiscuity failed, promiscuity feature "
3316 "of device might be broken.\n", dev->name);
3317 return -EOVERFLOW;
3318 }
3319 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003320 if (dev->flags != old_flags) {
3321 printk(KERN_INFO "device %s %s promiscuous mode\n",
3322 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3323 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003324 if (audit_enabled) {
3325 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003326 audit_log(current->audit_context, GFP_ATOMIC,
3327 AUDIT_ANOM_PROMISCUOUS,
3328 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3329 dev->name, (dev->flags & IFF_PROMISC),
3330 (old_flags & IFF_PROMISC),
3331 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003332 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003333 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003334 }
Patrick McHardy24023452007-07-14 18:51:31 -07003335
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003336 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003337 }
Wang Chendad9b332008-06-18 01:48:28 -07003338 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003339}
3340
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341/**
3342 * dev_set_promiscuity - update promiscuity count on a device
3343 * @dev: device
3344 * @inc: modifier
3345 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003346 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 * remains above zero the interface remains promiscuous. Once it hits zero
3348 * the device reverts back to normal filtering operation. A negative inc
3349 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003350 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 */
Wang Chendad9b332008-06-18 01:48:28 -07003352int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353{
3354 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003355 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
Wang Chendad9b332008-06-18 01:48:28 -07003357 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003358 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003359 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003360 if (dev->flags != old_flags)
3361 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003362 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363}
3364
3365/**
3366 * dev_set_allmulti - update allmulti count on a device
3367 * @dev: device
3368 * @inc: modifier
3369 *
3370 * Add or remove reception of all multicast frames to a device. While the
3371 * count in the device remains above zero the interface remains listening
3372 * to all interfaces. Once it hits zero the device reverts back to normal
3373 * filtering operation. A negative @inc value is used to drop the counter
3374 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003375 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 */
3377
Wang Chendad9b332008-06-18 01:48:28 -07003378int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379{
3380 unsigned short old_flags = dev->flags;
3381
Patrick McHardy24023452007-07-14 18:51:31 -07003382 ASSERT_RTNL();
3383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003385 dev->allmulti += inc;
3386 if (dev->allmulti == 0) {
3387 /*
3388 * Avoid overflow.
3389 * If inc causes overflow, untouch allmulti and return error.
3390 */
3391 if (inc < 0)
3392 dev->flags &= ~IFF_ALLMULTI;
3393 else {
3394 dev->allmulti -= inc;
3395 printk(KERN_WARNING "%s: allmulti touches roof, "
3396 "set allmulti failed, allmulti feature of "
3397 "device might be broken.\n", dev->name);
3398 return -EOVERFLOW;
3399 }
3400 }
Patrick McHardy24023452007-07-14 18:51:31 -07003401 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003402 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003403 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003404 }
Wang Chendad9b332008-06-18 01:48:28 -07003405 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003406}
3407
3408/*
3409 * Upload unicast and multicast address lists to device and
3410 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003411 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003412 * are present.
3413 */
3414void __dev_set_rx_mode(struct net_device *dev)
3415{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003416 const struct net_device_ops *ops = dev->netdev_ops;
3417
Patrick McHardy4417da62007-06-27 01:28:10 -07003418 /* dev_open will call this function so the list will stay sane. */
3419 if (!(dev->flags&IFF_UP))
3420 return;
3421
3422 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003423 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003424
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003425 if (ops->ndo_set_rx_mode)
3426 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003427 else {
3428 /* Unicast addresses changes may only happen under the rtnl,
3429 * therefore calling __dev_set_promiscuity here is safe.
3430 */
3431 if (dev->uc_count > 0 && !dev->uc_promisc) {
3432 __dev_set_promiscuity(dev, 1);
3433 dev->uc_promisc = 1;
3434 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3435 __dev_set_promiscuity(dev, -1);
3436 dev->uc_promisc = 0;
3437 }
3438
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003439 if (ops->ndo_set_multicast_list)
3440 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003441 }
3442}
3443
3444void dev_set_rx_mode(struct net_device *dev)
3445{
David S. Millerb9e40852008-07-15 00:15:08 -07003446 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003447 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003448 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449}
3450
Jiri Pirkof001fde2009-05-05 02:48:28 +00003451/* hw addresses list handling functions */
3452
3453static int __hw_addr_add(struct list_head *list, unsigned char *addr,
3454 int addr_len, unsigned char addr_type)
3455{
3456 struct netdev_hw_addr *ha;
3457 int alloc_size;
3458
3459 if (addr_len > MAX_ADDR_LEN)
3460 return -EINVAL;
3461
3462 alloc_size = sizeof(*ha);
3463 if (alloc_size < L1_CACHE_BYTES)
3464 alloc_size = L1_CACHE_BYTES;
3465 ha = kmalloc(alloc_size, GFP_ATOMIC);
3466 if (!ha)
3467 return -ENOMEM;
3468 memcpy(ha->addr, addr, addr_len);
3469 ha->type = addr_type;
3470 list_add_tail_rcu(&ha->list, list);
3471 return 0;
3472}
3473
3474static void ha_rcu_free(struct rcu_head *head)
3475{
3476 struct netdev_hw_addr *ha;
3477
3478 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3479 kfree(ha);
3480}
3481
3482static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
3483 int addr_len, unsigned char addr_type,
3484 int ignore_index)
3485{
3486 struct netdev_hw_addr *ha;
3487 int i = 0;
3488
3489 list_for_each_entry(ha, list, list) {
3490 if (i++ != ignore_index &&
3491 !memcmp(ha->addr, addr, addr_len) &&
3492 (ha->type == addr_type || !addr_type)) {
3493 list_del_rcu(&ha->list);
3494 call_rcu(&ha->rcu_head, ha_rcu_free);
3495 return 0;
3496 }
3497 }
3498 return -ENOENT;
3499}
3500
3501static int __hw_addr_add_multiple_ii(struct list_head *to_list,
3502 struct list_head *from_list,
3503 int addr_len, unsigned char addr_type,
3504 int ignore_index)
3505{
3506 int err;
3507 struct netdev_hw_addr *ha, *ha2;
3508 unsigned char type;
3509
3510 list_for_each_entry(ha, from_list, list) {
3511 type = addr_type ? addr_type : ha->type;
3512 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3513 if (err)
3514 goto unroll;
3515 }
3516 return 0;
3517
3518unroll:
3519 list_for_each_entry(ha2, from_list, list) {
3520 if (ha2 == ha)
3521 break;
3522 type = addr_type ? addr_type : ha2->type;
3523 __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
3524 ignore_index);
3525 }
3526 return err;
3527}
3528
3529static void __hw_addr_del_multiple_ii(struct list_head *to_list,
3530 struct list_head *from_list,
3531 int addr_len, unsigned char addr_type,
3532 int ignore_index)
3533{
3534 struct netdev_hw_addr *ha;
3535 unsigned char type;
3536
3537 list_for_each_entry(ha, from_list, list) {
3538 type = addr_type ? addr_type : ha->type;
3539 __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
3540 ignore_index);
3541 }
3542}
3543
3544static void __hw_addr_flush(struct list_head *list)
3545{
3546 struct netdev_hw_addr *ha, *tmp;
3547
3548 list_for_each_entry_safe(ha, tmp, list, list) {
3549 list_del_rcu(&ha->list);
3550 call_rcu(&ha->rcu_head, ha_rcu_free);
3551 }
3552}
3553
3554/* Device addresses handling functions */
3555
3556static void dev_addr_flush(struct net_device *dev)
3557{
3558 /* rtnl_mutex must be held here */
3559
3560 __hw_addr_flush(&dev->dev_addr_list);
3561 dev->dev_addr = NULL;
3562}
3563
3564static int dev_addr_init(struct net_device *dev)
3565{
3566 unsigned char addr[MAX_ADDR_LEN];
3567 struct netdev_hw_addr *ha;
3568 int err;
3569
3570 /* rtnl_mutex must be held here */
3571
3572 INIT_LIST_HEAD(&dev->dev_addr_list);
3573 memset(addr, 0, sizeof(*addr));
3574 err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
3575 NETDEV_HW_ADDR_T_LAN);
3576 if (!err) {
3577 /*
3578 * Get the first (previously created) address from the list
3579 * and set dev_addr pointer to this location.
3580 */
3581 ha = list_first_entry(&dev->dev_addr_list,
3582 struct netdev_hw_addr, list);
3583 dev->dev_addr = ha->addr;
3584 }
3585 return err;
3586}
3587
3588/**
3589 * dev_addr_add - Add a device address
3590 * @dev: device
3591 * @addr: address to add
3592 * @addr_type: address type
3593 *
3594 * Add a device address to the device or increase the reference count if
3595 * it already exists.
3596 *
3597 * The caller must hold the rtnl_mutex.
3598 */
3599int dev_addr_add(struct net_device *dev, unsigned char *addr,
3600 unsigned char addr_type)
3601{
3602 int err;
3603
3604 ASSERT_RTNL();
3605
3606 err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
3607 addr_type);
3608 if (!err)
3609 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3610 return err;
3611}
3612EXPORT_SYMBOL(dev_addr_add);
3613
3614/**
3615 * dev_addr_del - Release a device address.
3616 * @dev: device
3617 * @addr: address to delete
3618 * @addr_type: address type
3619 *
3620 * Release reference to a device address and remove it from the device
3621 * if the reference count drops to zero.
3622 *
3623 * The caller must hold the rtnl_mutex.
3624 */
3625int dev_addr_del(struct net_device *dev, unsigned char *addr,
3626 unsigned char addr_type)
3627{
3628 int err;
3629
3630 ASSERT_RTNL();
3631
3632 err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
3633 addr_type, 0);
3634 if (!err)
3635 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3636 return err;
3637}
3638EXPORT_SYMBOL(dev_addr_del);
3639
3640/**
3641 * dev_addr_add_multiple - Add device addresses from another device
3642 * @to_dev: device to which addresses will be added
3643 * @from_dev: device from which addresses will be added
3644 * @addr_type: address type - 0 means type will be used from from_dev
3645 *
3646 * Add device addresses of the one device to another.
3647 **
3648 * The caller must hold the rtnl_mutex.
3649 */
3650int dev_addr_add_multiple(struct net_device *to_dev,
3651 struct net_device *from_dev,
3652 unsigned char addr_type)
3653{
3654 int err;
3655
3656 ASSERT_RTNL();
3657
3658 if (from_dev->addr_len != to_dev->addr_len)
3659 return -EINVAL;
3660 err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
3661 &from_dev->dev_addr_list,
3662 to_dev->addr_len, addr_type, 0);
3663 if (!err)
3664 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3665 return err;
3666}
3667EXPORT_SYMBOL(dev_addr_add_multiple);
3668
3669/**
3670 * dev_addr_del_multiple - Delete device addresses by another device
3671 * @to_dev: device where the addresses will be deleted
3672 * @from_dev: device by which addresses the addresses will be deleted
3673 * @addr_type: address type - 0 means type will used from from_dev
3674 *
3675 * Deletes addresses in to device by the list of addresses in from device.
3676 *
3677 * The caller must hold the rtnl_mutex.
3678 */
3679int dev_addr_del_multiple(struct net_device *to_dev,
3680 struct net_device *from_dev,
3681 unsigned char addr_type)
3682{
3683 ASSERT_RTNL();
3684
3685 if (from_dev->addr_len != to_dev->addr_len)
3686 return -EINVAL;
3687 __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
3688 &from_dev->dev_addr_list,
3689 to_dev->addr_len, addr_type, 0);
3690 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3691 return 0;
3692}
3693EXPORT_SYMBOL(dev_addr_del_multiple);
3694
3695/* unicast and multicast addresses handling functions */
3696
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003697int __dev_addr_delete(struct dev_addr_list **list, int *count,
3698 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003699{
3700 struct dev_addr_list *da;
3701
3702 for (; (da = *list) != NULL; list = &da->next) {
3703 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3704 alen == da->da_addrlen) {
3705 if (glbl) {
3706 int old_glbl = da->da_gusers;
3707 da->da_gusers = 0;
3708 if (old_glbl == 0)
3709 break;
3710 }
3711 if (--da->da_users)
3712 return 0;
3713
3714 *list = da->next;
3715 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003716 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003717 return 0;
3718 }
3719 }
3720 return -ENOENT;
3721}
3722
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003723int __dev_addr_add(struct dev_addr_list **list, int *count,
3724 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003725{
3726 struct dev_addr_list *da;
3727
3728 for (da = *list; da != NULL; da = da->next) {
3729 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3730 da->da_addrlen == alen) {
3731 if (glbl) {
3732 int old_glbl = da->da_gusers;
3733 da->da_gusers = 1;
3734 if (old_glbl)
3735 return 0;
3736 }
3737 da->da_users++;
3738 return 0;
3739 }
3740 }
3741
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003742 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003743 if (da == NULL)
3744 return -ENOMEM;
3745 memcpy(da->da_addr, addr, alen);
3746 da->da_addrlen = alen;
3747 da->da_users = 1;
3748 da->da_gusers = glbl ? 1 : 0;
3749 da->next = *list;
3750 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003751 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003752 return 0;
3753}
3754
Patrick McHardy4417da62007-06-27 01:28:10 -07003755/**
3756 * dev_unicast_delete - Release secondary unicast address.
3757 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003758 * @addr: address to delete
3759 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003760 *
3761 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003762 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003763 *
3764 * The caller must hold the rtnl_mutex.
3765 */
3766int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3767{
3768 int err;
3769
3770 ASSERT_RTNL();
3771
David S. Millerb9e40852008-07-15 00:15:08 -07003772 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003773 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3774 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003775 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003776 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003777 return err;
3778}
3779EXPORT_SYMBOL(dev_unicast_delete);
3780
3781/**
3782 * dev_unicast_add - add a secondary unicast address
3783 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003784 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003785 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003786 *
3787 * Add a secondary unicast address to the device or increase
3788 * the reference count if it already exists.
3789 *
3790 * The caller must hold the rtnl_mutex.
3791 */
3792int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3793{
3794 int err;
3795
3796 ASSERT_RTNL();
3797
David S. Millerb9e40852008-07-15 00:15:08 -07003798 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003799 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3800 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003801 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003802 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003803 return err;
3804}
3805EXPORT_SYMBOL(dev_unicast_add);
3806
Chris Leeche83a2ea2008-01-31 16:53:23 -08003807int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3808 struct dev_addr_list **from, int *from_count)
3809{
3810 struct dev_addr_list *da, *next;
3811 int err = 0;
3812
3813 da = *from;
3814 while (da != NULL) {
3815 next = da->next;
3816 if (!da->da_synced) {
3817 err = __dev_addr_add(to, to_count,
3818 da->da_addr, da->da_addrlen, 0);
3819 if (err < 0)
3820 break;
3821 da->da_synced = 1;
3822 da->da_users++;
3823 } else if (da->da_users == 1) {
3824 __dev_addr_delete(to, to_count,
3825 da->da_addr, da->da_addrlen, 0);
3826 __dev_addr_delete(from, from_count,
3827 da->da_addr, da->da_addrlen, 0);
3828 }
3829 da = next;
3830 }
3831 return err;
3832}
3833
3834void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3835 struct dev_addr_list **from, int *from_count)
3836{
3837 struct dev_addr_list *da, *next;
3838
3839 da = *from;
3840 while (da != NULL) {
3841 next = da->next;
3842 if (da->da_synced) {
3843 __dev_addr_delete(to, to_count,
3844 da->da_addr, da->da_addrlen, 0);
3845 da->da_synced = 0;
3846 __dev_addr_delete(from, from_count,
3847 da->da_addr, da->da_addrlen, 0);
3848 }
3849 da = next;
3850 }
3851}
3852
3853/**
3854 * dev_unicast_sync - Synchronize device's unicast list to another device
3855 * @to: destination device
3856 * @from: source device
3857 *
3858 * Add newly added addresses to the destination device and release
3859 * addresses that have no users left. The source device must be
3860 * locked by netif_tx_lock_bh.
3861 *
3862 * This function is intended to be called from the dev->set_rx_mode
3863 * function of layered software devices.
3864 */
3865int dev_unicast_sync(struct net_device *to, struct net_device *from)
3866{
3867 int err = 0;
3868
David S. Millerb9e40852008-07-15 00:15:08 -07003869 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003870 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3871 &from->uc_list, &from->uc_count);
3872 if (!err)
3873 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003874 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003875 return err;
3876}
3877EXPORT_SYMBOL(dev_unicast_sync);
3878
3879/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003880 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003881 * @to: destination device
3882 * @from: source device
3883 *
3884 * Remove all addresses that were added to the destination device by
3885 * dev_unicast_sync(). This function is intended to be called from the
3886 * dev->stop function of layered software devices.
3887 */
3888void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3889{
David S. Millerb9e40852008-07-15 00:15:08 -07003890 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003891 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003892
3893 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3894 &from->uc_list, &from->uc_count);
3895 __dev_set_rx_mode(to);
3896
David S. Millere308a5d2008-07-15 00:13:44 -07003897 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003898 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003899}
3900EXPORT_SYMBOL(dev_unicast_unsync);
3901
Denis Cheng12972622007-07-18 02:12:56 -07003902static void __dev_addr_discard(struct dev_addr_list **list)
3903{
3904 struct dev_addr_list *tmp;
3905
3906 while (*list != NULL) {
3907 tmp = *list;
3908 *list = tmp->next;
3909 if (tmp->da_users > tmp->da_gusers)
3910 printk("__dev_addr_discard: address leakage! "
3911 "da_users=%d\n", tmp->da_users);
3912 kfree(tmp);
3913 }
3914}
3915
Denis Cheng26cc2522007-07-18 02:12:03 -07003916static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003917{
David S. Millerb9e40852008-07-15 00:15:08 -07003918 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003919
Patrick McHardy4417da62007-06-27 01:28:10 -07003920 __dev_addr_discard(&dev->uc_list);
3921 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003922
Denis Cheng456ad752007-07-18 02:10:54 -07003923 __dev_addr_discard(&dev->mc_list);
3924 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003925
David S. Millerb9e40852008-07-15 00:15:08 -07003926 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003927}
3928
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07003929/**
3930 * dev_get_flags - get flags reported to userspace
3931 * @dev: device
3932 *
3933 * Get the combination of flag bits exported through APIs to userspace.
3934 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935unsigned dev_get_flags(const struct net_device *dev)
3936{
3937 unsigned flags;
3938
3939 flags = (dev->flags & ~(IFF_PROMISC |
3940 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003941 IFF_RUNNING |
3942 IFF_LOWER_UP |
3943 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 (dev->gflags & (IFF_PROMISC |
3945 IFF_ALLMULTI));
3946
Stefan Rompfb00055a2006-03-20 17:09:11 -08003947 if (netif_running(dev)) {
3948 if (netif_oper_up(dev))
3949 flags |= IFF_RUNNING;
3950 if (netif_carrier_ok(dev))
3951 flags |= IFF_LOWER_UP;
3952 if (netif_dormant(dev))
3953 flags |= IFF_DORMANT;
3954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955
3956 return flags;
3957}
3958
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07003959/**
3960 * dev_change_flags - change device settings
3961 * @dev: device
3962 * @flags: device state flags
3963 *
3964 * Change settings on device based state flags. The flags are
3965 * in the userspace exported format.
3966 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967int dev_change_flags(struct net_device *dev, unsigned flags)
3968{
Thomas Graf7c355f52007-06-05 16:03:03 -07003969 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 int old_flags = dev->flags;
3971
Patrick McHardy24023452007-07-14 18:51:31 -07003972 ASSERT_RTNL();
3973
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 /*
3975 * Set the flags on our device.
3976 */
3977
3978 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3979 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3980 IFF_AUTOMEDIA)) |
3981 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3982 IFF_ALLMULTI));
3983
3984 /*
3985 * Load in the correct multicast list now the flags have changed.
3986 */
3987
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003988 if ((old_flags ^ flags) & IFF_MULTICAST)
3989 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07003990
Patrick McHardy4417da62007-06-27 01:28:10 -07003991 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
3993 /*
3994 * Have we downed the interface. We handle IFF_UP ourselves
3995 * according to user attempts to set it, rather than blindly
3996 * setting it.
3997 */
3998
3999 ret = 0;
4000 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4001 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4002
4003 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004004 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 }
4006
4007 if (dev->flags & IFF_UP &&
4008 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4009 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004010 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011
4012 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4013 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4014 dev->gflags ^= IFF_PROMISC;
4015 dev_set_promiscuity(dev, inc);
4016 }
4017
4018 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4019 is important. Some (broken) drivers set IFF_PROMISC, when
4020 IFF_ALLMULTI is requested not asking us and not reporting.
4021 */
4022 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4023 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4024 dev->gflags ^= IFF_ALLMULTI;
4025 dev_set_allmulti(dev, inc);
4026 }
4027
Thomas Graf7c355f52007-06-05 16:03:03 -07004028 /* Exclude state transition flags, already notified */
4029 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4030 if (changes)
4031 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
4033 return ret;
4034}
4035
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004036/**
4037 * dev_set_mtu - Change maximum transfer unit
4038 * @dev: device
4039 * @new_mtu: new transfer unit
4040 *
4041 * Change the maximum transfer size of the network device.
4042 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043int dev_set_mtu(struct net_device *dev, int new_mtu)
4044{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004045 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 int err;
4047
4048 if (new_mtu == dev->mtu)
4049 return 0;
4050
4051 /* MTU must be positive. */
4052 if (new_mtu < 0)
4053 return -EINVAL;
4054
4055 if (!netif_device_present(dev))
4056 return -ENODEV;
4057
4058 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004059 if (ops->ndo_change_mtu)
4060 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061 else
4062 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004063
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004065 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 return err;
4067}
4068
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004069/**
4070 * dev_set_mac_address - Change Media Access Control Address
4071 * @dev: device
4072 * @sa: new address
4073 *
4074 * Change the hardware (MAC) address of the device
4075 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4077{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004078 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 int err;
4080
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004081 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 return -EOPNOTSUPP;
4083 if (sa->sa_family != dev->type)
4084 return -EINVAL;
4085 if (!netif_device_present(dev))
4086 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004087 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004089 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 return err;
4091}
4092
4093/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004094 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004096static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097{
4098 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004099 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
4101 if (!dev)
4102 return -ENODEV;
4103
4104 switch (cmd) {
4105 case SIOCGIFFLAGS: /* Get interface flags */
4106 ifr->ifr_flags = dev_get_flags(dev);
4107 return 0;
4108
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 case SIOCGIFMETRIC: /* Get the metric on the interface
4110 (currently unused) */
4111 ifr->ifr_metric = 0;
4112 return 0;
4113
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 case SIOCGIFMTU: /* Get the MTU of a device */
4115 ifr->ifr_mtu = dev->mtu;
4116 return 0;
4117
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 case SIOCGIFHWADDR:
4119 if (!dev->addr_len)
4120 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4121 else
4122 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4123 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4124 ifr->ifr_hwaddr.sa_family = dev->type;
4125 return 0;
4126
Jeff Garzik14e3e072007-10-08 00:06:32 -07004127 case SIOCGIFSLAVE:
4128 err = -EINVAL;
4129 break;
4130
4131 case SIOCGIFMAP:
4132 ifr->ifr_map.mem_start = dev->mem_start;
4133 ifr->ifr_map.mem_end = dev->mem_end;
4134 ifr->ifr_map.base_addr = dev->base_addr;
4135 ifr->ifr_map.irq = dev->irq;
4136 ifr->ifr_map.dma = dev->dma;
4137 ifr->ifr_map.port = dev->if_port;
4138 return 0;
4139
4140 case SIOCGIFINDEX:
4141 ifr->ifr_ifindex = dev->ifindex;
4142 return 0;
4143
4144 case SIOCGIFTXQLEN:
4145 ifr->ifr_qlen = dev->tx_queue_len;
4146 return 0;
4147
4148 default:
4149 /* dev_ioctl() should ensure this case
4150 * is never reached
4151 */
4152 WARN_ON(1);
4153 err = -EINVAL;
4154 break;
4155
4156 }
4157 return err;
4158}
4159
4160/*
4161 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4162 */
4163static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4164{
4165 int err;
4166 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004167 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004168
4169 if (!dev)
4170 return -ENODEV;
4171
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004172 ops = dev->netdev_ops;
4173
Jeff Garzik14e3e072007-10-08 00:06:32 -07004174 switch (cmd) {
4175 case SIOCSIFFLAGS: /* Set interface flags */
4176 return dev_change_flags(dev, ifr->ifr_flags);
4177
4178 case SIOCSIFMETRIC: /* Set the metric on the interface
4179 (currently unused) */
4180 return -EOPNOTSUPP;
4181
4182 case SIOCSIFMTU: /* Set the MTU of a device */
4183 return dev_set_mtu(dev, ifr->ifr_mtu);
4184
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 case SIOCSIFHWADDR:
4186 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4187
4188 case SIOCSIFHWBROADCAST:
4189 if (ifr->ifr_hwaddr.sa_family != dev->type)
4190 return -EINVAL;
4191 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4192 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004193 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 return 0;
4195
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004197 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 if (!netif_device_present(dev))
4199 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004200 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 }
4202 return -EOPNOTSUPP;
4203
4204 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004205 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4207 return -EINVAL;
4208 if (!netif_device_present(dev))
4209 return -ENODEV;
4210 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4211 dev->addr_len, 1);
4212
4213 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004214 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4216 return -EINVAL;
4217 if (!netif_device_present(dev))
4218 return -ENODEV;
4219 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4220 dev->addr_len, 1);
4221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 case SIOCSIFTXQLEN:
4223 if (ifr->ifr_qlen < 0)
4224 return -EINVAL;
4225 dev->tx_queue_len = ifr->ifr_qlen;
4226 return 0;
4227
4228 case SIOCSIFNAME:
4229 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4230 return dev_change_name(dev, ifr->ifr_newname);
4231
4232 /*
4233 * Unknown or private ioctl
4234 */
4235
4236 default:
4237 if ((cmd >= SIOCDEVPRIVATE &&
4238 cmd <= SIOCDEVPRIVATE + 15) ||
4239 cmd == SIOCBONDENSLAVE ||
4240 cmd == SIOCBONDRELEASE ||
4241 cmd == SIOCBONDSETHWADDR ||
4242 cmd == SIOCBONDSLAVEINFOQUERY ||
4243 cmd == SIOCBONDINFOQUERY ||
4244 cmd == SIOCBONDCHANGEACTIVE ||
4245 cmd == SIOCGMIIPHY ||
4246 cmd == SIOCGMIIREG ||
4247 cmd == SIOCSMIIREG ||
4248 cmd == SIOCBRADDIF ||
4249 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004250 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 cmd == SIOCWANDEV) {
4252 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004253 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004255 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 else
4257 err = -ENODEV;
4258 }
4259 } else
4260 err = -EINVAL;
4261
4262 }
4263 return err;
4264}
4265
4266/*
4267 * This function handles all "interface"-type I/O control requests. The actual
4268 * 'doing' part of this is dev_ifsioc above.
4269 */
4270
4271/**
4272 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004273 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 * @cmd: command to issue
4275 * @arg: pointer to a struct ifreq in user space
4276 *
4277 * Issue ioctl functions to devices. This is normally called by the
4278 * user space syscall interfaces but can sometimes be useful for
4279 * other purposes. The return value is the return from the syscall if
4280 * positive or a negative errno code on error.
4281 */
4282
Eric W. Biederman881d9662007-09-17 11:56:21 -07004283int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284{
4285 struct ifreq ifr;
4286 int ret;
4287 char *colon;
4288
4289 /* One special case: SIOCGIFCONF takes ifconf argument
4290 and requires shared lock, because it sleeps writing
4291 to user space.
4292 */
4293
4294 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004295 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004296 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004297 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 return ret;
4299 }
4300 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004301 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302
4303 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4304 return -EFAULT;
4305
4306 ifr.ifr_name[IFNAMSIZ-1] = 0;
4307
4308 colon = strchr(ifr.ifr_name, ':');
4309 if (colon)
4310 *colon = 0;
4311
4312 /*
4313 * See which interface the caller is talking about.
4314 */
4315
4316 switch (cmd) {
4317 /*
4318 * These ioctl calls:
4319 * - can be done by all.
4320 * - atomic and do not require locking.
4321 * - return a value
4322 */
4323 case SIOCGIFFLAGS:
4324 case SIOCGIFMETRIC:
4325 case SIOCGIFMTU:
4326 case SIOCGIFHWADDR:
4327 case SIOCGIFSLAVE:
4328 case SIOCGIFMAP:
4329 case SIOCGIFINDEX:
4330 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004331 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004333 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 read_unlock(&dev_base_lock);
4335 if (!ret) {
4336 if (colon)
4337 *colon = ':';
4338 if (copy_to_user(arg, &ifr,
4339 sizeof(struct ifreq)))
4340 ret = -EFAULT;
4341 }
4342 return ret;
4343
4344 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004345 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004347 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 rtnl_unlock();
4349 if (!ret) {
4350 if (colon)
4351 *colon = ':';
4352 if (copy_to_user(arg, &ifr,
4353 sizeof(struct ifreq)))
4354 ret = -EFAULT;
4355 }
4356 return ret;
4357
4358 /*
4359 * These ioctl calls:
4360 * - require superuser power.
4361 * - require strict serialization.
4362 * - return a value
4363 */
4364 case SIOCGMIIPHY:
4365 case SIOCGMIIREG:
4366 case SIOCSIFNAME:
4367 if (!capable(CAP_NET_ADMIN))
4368 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004369 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004371 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 rtnl_unlock();
4373 if (!ret) {
4374 if (colon)
4375 *colon = ':';
4376 if (copy_to_user(arg, &ifr,
4377 sizeof(struct ifreq)))
4378 ret = -EFAULT;
4379 }
4380 return ret;
4381
4382 /*
4383 * These ioctl calls:
4384 * - require superuser power.
4385 * - require strict serialization.
4386 * - do not return a value
4387 */
4388 case SIOCSIFFLAGS:
4389 case SIOCSIFMETRIC:
4390 case SIOCSIFMTU:
4391 case SIOCSIFMAP:
4392 case SIOCSIFHWADDR:
4393 case SIOCSIFSLAVE:
4394 case SIOCADDMULTI:
4395 case SIOCDELMULTI:
4396 case SIOCSIFHWBROADCAST:
4397 case SIOCSIFTXQLEN:
4398 case SIOCSMIIREG:
4399 case SIOCBONDENSLAVE:
4400 case SIOCBONDRELEASE:
4401 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 case SIOCBONDCHANGEACTIVE:
4403 case SIOCBRADDIF:
4404 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004405 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 if (!capable(CAP_NET_ADMIN))
4407 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004408 /* fall through */
4409 case SIOCBONDSLAVEINFOQUERY:
4410 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004411 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004413 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 rtnl_unlock();
4415 return ret;
4416
4417 case SIOCGIFMEM:
4418 /* Get the per device memory space. We can add this but
4419 * currently do not support it */
4420 case SIOCSIFMEM:
4421 /* Set the per device memory buffer space.
4422 * Not applicable in our case */
4423 case SIOCSIFLINK:
4424 return -EINVAL;
4425
4426 /*
4427 * Unknown or private ioctl.
4428 */
4429 default:
4430 if (cmd == SIOCWANDEV ||
4431 (cmd >= SIOCDEVPRIVATE &&
4432 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004433 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004435 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 rtnl_unlock();
4437 if (!ret && copy_to_user(arg, &ifr,
4438 sizeof(struct ifreq)))
4439 ret = -EFAULT;
4440 return ret;
4441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004443 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004444 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 return -EINVAL;
4446 }
4447}
4448
4449
4450/**
4451 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004452 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 *
4454 * Returns a suitable unique value for a new device interface
4455 * number. The caller must hold the rtnl semaphore or the
4456 * dev_base_lock to be sure it remains unique.
4457 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004458static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459{
4460 static int ifindex;
4461 for (;;) {
4462 if (++ifindex <= 0)
4463 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004464 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 return ifindex;
4466 }
4467}
4468
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004470static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004472static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475}
4476
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004477static void rollback_registered(struct net_device *dev)
4478{
4479 BUG_ON(dev_boot_phase);
4480 ASSERT_RTNL();
4481
4482 /* Some devices call without registering for initialization unwind. */
4483 if (dev->reg_state == NETREG_UNINITIALIZED) {
4484 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4485 "was registered\n", dev->name, dev);
4486
4487 WARN_ON(1);
4488 return;
4489 }
4490
4491 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4492
4493 /* If device is running, close it first. */
4494 dev_close(dev);
4495
4496 /* And unlink it from device chain. */
4497 unlist_netdevice(dev);
4498
4499 dev->reg_state = NETREG_UNREGISTERING;
4500
4501 synchronize_net();
4502
4503 /* Shutdown queueing discipline. */
4504 dev_shutdown(dev);
4505
4506
4507 /* Notify protocols, that we are about to destroy
4508 this device. They should clean all the things.
4509 */
4510 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4511
4512 /*
4513 * Flush the unicast and multicast chains
4514 */
4515 dev_addr_discard(dev);
4516
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004517 if (dev->netdev_ops->ndo_uninit)
4518 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004519
4520 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004521 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004522
4523 /* Remove entries from kobject tree */
4524 netdev_unregister_kobject(dev);
4525
4526 synchronize_net();
4527
4528 dev_put(dev);
4529}
4530
David S. Millere8a04642008-07-17 00:34:19 -07004531static void __netdev_init_queue_locks_one(struct net_device *dev,
4532 struct netdev_queue *dev_queue,
4533 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004534{
4535 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004536 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004537 dev_queue->xmit_lock_owner = -1;
4538}
4539
4540static void netdev_init_queue_locks(struct net_device *dev)
4541{
David S. Millere8a04642008-07-17 00:34:19 -07004542 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4543 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004544}
4545
Herbert Xub63365a2008-10-23 01:11:29 -07004546unsigned long netdev_fix_features(unsigned long features, const char *name)
4547{
4548 /* Fix illegal SG+CSUM combinations. */
4549 if ((features & NETIF_F_SG) &&
4550 !(features & NETIF_F_ALL_CSUM)) {
4551 if (name)
4552 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4553 "checksum feature.\n", name);
4554 features &= ~NETIF_F_SG;
4555 }
4556
4557 /* TSO requires that SG is present as well. */
4558 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4559 if (name)
4560 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4561 "SG feature.\n", name);
4562 features &= ~NETIF_F_TSO;
4563 }
4564
4565 if (features & NETIF_F_UFO) {
4566 if (!(features & NETIF_F_GEN_CSUM)) {
4567 if (name)
4568 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4569 "since no NETIF_F_HW_CSUM feature.\n",
4570 name);
4571 features &= ~NETIF_F_UFO;
4572 }
4573
4574 if (!(features & NETIF_F_SG)) {
4575 if (name)
4576 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4577 "since no NETIF_F_SG feature.\n", name);
4578 features &= ~NETIF_F_UFO;
4579 }
4580 }
4581
4582 return features;
4583}
4584EXPORT_SYMBOL(netdev_fix_features);
4585
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586/**
4587 * register_netdevice - register a network device
4588 * @dev: device to register
4589 *
4590 * Take a completed network device structure and add it to the kernel
4591 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4592 * chain. 0 is returned on success. A negative errno code is returned
4593 * on a failure to set up the device, or if the name is a duplicate.
4594 *
4595 * Callers must hold the rtnl semaphore. You may want
4596 * register_netdev() instead of this.
4597 *
4598 * BUGS:
4599 * The locking appears insufficient to guarantee two parallel registers
4600 * will not get the same name.
4601 */
4602
4603int register_netdevice(struct net_device *dev)
4604{
4605 struct hlist_head *head;
4606 struct hlist_node *p;
4607 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004608 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609
4610 BUG_ON(dev_boot_phase);
4611 ASSERT_RTNL();
4612
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004613 might_sleep();
4614
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615 /* When net_device's are persistent, this will be fatal. */
4616 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004617 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
David S. Millerf1f28aa2008-07-15 00:08:33 -07004619 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004620 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004621 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 dev->iflink = -1;
4624
4625 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004626 if (dev->netdev_ops->ndo_init) {
4627 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 if (ret) {
4629 if (ret > 0)
4630 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004631 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 }
4633 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004634
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 if (!dev_valid_name(dev->name)) {
4636 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004637 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 }
4639
Eric W. Biederman881d9662007-09-17 11:56:21 -07004640 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 if (dev->iflink == -1)
4642 dev->iflink = dev->ifindex;
4643
4644 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004645 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 hlist_for_each(p, head) {
4647 struct net_device *d
4648 = hlist_entry(p, struct net_device, name_hlist);
4649 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4650 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004651 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004655 /* Fix illegal checksum combinations */
4656 if ((dev->features & NETIF_F_HW_CSUM) &&
4657 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4658 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4659 dev->name);
4660 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4661 }
4662
4663 if ((dev->features & NETIF_F_NO_CSUM) &&
4664 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4665 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4666 dev->name);
4667 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4668 }
4669
Herbert Xub63365a2008-10-23 01:11:29 -07004670 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004672 /* Enable software GSO if SG is supported. */
4673 if (dev->features & NETIF_F_SG)
4674 dev->features |= NETIF_F_GSO;
4675
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004676 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004677 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004678 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004679 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004680 dev->reg_state = NETREG_REGISTERED;
4681
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 /*
4683 * Default initial state at registry is that the
4684 * device is present.
4685 */
4686
4687 set_bit(__LINK_STATE_PRESENT, &dev->state);
4688
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004691 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692
4693 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004694 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004695 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004696 if (ret) {
4697 rollback_registered(dev);
4698 dev->reg_state = NETREG_UNREGISTERED;
4699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700
4701out:
4702 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004703
4704err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004705 if (dev->netdev_ops->ndo_uninit)
4706 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004707 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708}
4709
4710/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004711 * init_dummy_netdev - init a dummy network device for NAPI
4712 * @dev: device to init
4713 *
4714 * This takes a network device structure and initialize the minimum
4715 * amount of fields so it can be used to schedule NAPI polls without
4716 * registering a full blown interface. This is to be used by drivers
4717 * that need to tie several hardware interfaces to a single NAPI
4718 * poll scheduler due to HW limitations.
4719 */
4720int init_dummy_netdev(struct net_device *dev)
4721{
4722 /* Clear everything. Note we don't initialize spinlocks
4723 * are they aren't supposed to be taken by any of the
4724 * NAPI code and this dummy netdev is supposed to be
4725 * only ever used for NAPI polls
4726 */
4727 memset(dev, 0, sizeof(struct net_device));
4728
4729 /* make sure we BUG if trying to hit standard
4730 * register/unregister code path
4731 */
4732 dev->reg_state = NETREG_DUMMY;
4733
4734 /* initialize the ref count */
4735 atomic_set(&dev->refcnt, 1);
4736
4737 /* NAPI wants this */
4738 INIT_LIST_HEAD(&dev->napi_list);
4739
4740 /* a dummy interface is started by default */
4741 set_bit(__LINK_STATE_PRESENT, &dev->state);
4742 set_bit(__LINK_STATE_START, &dev->state);
4743
4744 return 0;
4745}
4746EXPORT_SYMBOL_GPL(init_dummy_netdev);
4747
4748
4749/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750 * register_netdev - register a network device
4751 * @dev: device to register
4752 *
4753 * Take a completed network device structure and add it to the kernel
4754 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4755 * chain. 0 is returned on success. A negative errno code is returned
4756 * on a failure to set up the device, or if the name is a duplicate.
4757 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004758 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759 * and expands the device name if you passed a format string to
4760 * alloc_netdev.
4761 */
4762int register_netdev(struct net_device *dev)
4763{
4764 int err;
4765
4766 rtnl_lock();
4767
4768 /*
4769 * If the name is a format string the caller wants us to do a
4770 * name allocation.
4771 */
4772 if (strchr(dev->name, '%')) {
4773 err = dev_alloc_name(dev, dev->name);
4774 if (err < 0)
4775 goto out;
4776 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004777
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 err = register_netdevice(dev);
4779out:
4780 rtnl_unlock();
4781 return err;
4782}
4783EXPORT_SYMBOL(register_netdev);
4784
4785/*
4786 * netdev_wait_allrefs - wait until all references are gone.
4787 *
4788 * This is called when unregistering network devices.
4789 *
4790 * Any protocol or device that holds a reference should register
4791 * for netdevice notification, and cleanup and put back the
4792 * reference if they receive an UNREGISTER event.
4793 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004794 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 */
4796static void netdev_wait_allrefs(struct net_device *dev)
4797{
4798 unsigned long rebroadcast_time, warning_time;
4799
4800 rebroadcast_time = warning_time = jiffies;
4801 while (atomic_read(&dev->refcnt) != 0) {
4802 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004803 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
4805 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004806 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807
4808 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4809 &dev->state)) {
4810 /* We must not have linkwatch events
4811 * pending on unregister. If this
4812 * happens, we simply run the queue
4813 * unscheduled, resulting in a noop
4814 * for this device.
4815 */
4816 linkwatch_run_queue();
4817 }
4818
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004819 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820
4821 rebroadcast_time = jiffies;
4822 }
4823
4824 msleep(250);
4825
4826 if (time_after(jiffies, warning_time + 10 * HZ)) {
4827 printk(KERN_EMERG "unregister_netdevice: "
4828 "waiting for %s to become free. Usage "
4829 "count = %d\n",
4830 dev->name, atomic_read(&dev->refcnt));
4831 warning_time = jiffies;
4832 }
4833 }
4834}
4835
4836/* The sequence is:
4837 *
4838 * rtnl_lock();
4839 * ...
4840 * register_netdevice(x1);
4841 * register_netdevice(x2);
4842 * ...
4843 * unregister_netdevice(y1);
4844 * unregister_netdevice(y2);
4845 * ...
4846 * rtnl_unlock();
4847 * free_netdev(y1);
4848 * free_netdev(y2);
4849 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004850 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004852 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 * without deadlocking with linkwatch via keventd.
4854 * 2) Since we run with the RTNL semaphore not held, we can sleep
4855 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004856 *
4857 * We must not return until all unregister events added during
4858 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860void netdev_run_todo(void)
4861{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004862 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004863
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004865 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004866
4867 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004868
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 while (!list_empty(&list)) {
4870 struct net_device *dev
4871 = list_entry(list.next, struct net_device, todo_list);
4872 list_del(&dev->todo_list);
4873
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004874 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875 printk(KERN_ERR "network todo '%s' but state %d\n",
4876 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004877 dump_stack();
4878 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004880
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004881 dev->reg_state = NETREG_UNREGISTERED;
4882
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004883 on_each_cpu(flush_backlog, dev, 1);
4884
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004885 netdev_wait_allrefs(dev);
4886
4887 /* paranoia */
4888 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004889 WARN_ON(dev->ip_ptr);
4890 WARN_ON(dev->ip6_ptr);
4891 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004892
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004893 if (dev->destructor)
4894 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004895
4896 /* Free network device */
4897 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899}
4900
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004901/**
4902 * dev_get_stats - get network device statistics
4903 * @dev: device to get statistics from
4904 *
4905 * Get network statistics from device. The device driver may provide
4906 * its own method by setting dev->netdev_ops->get_stats; otherwise
4907 * the internal statistics structure is used.
4908 */
4909const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00004910{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004911 const struct net_device_ops *ops = dev->netdev_ops;
4912
4913 if (ops->ndo_get_stats)
4914 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00004915 else {
4916 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
4917 struct net_device_stats *stats = &dev->stats;
4918 unsigned int i;
4919 struct netdev_queue *txq;
4920
4921 for (i = 0; i < dev->num_tx_queues; i++) {
4922 txq = netdev_get_tx_queue(dev, i);
4923 tx_bytes += txq->tx_bytes;
4924 tx_packets += txq->tx_packets;
4925 tx_dropped += txq->tx_dropped;
4926 }
4927 if (tx_bytes || tx_packets || tx_dropped) {
4928 stats->tx_bytes = tx_bytes;
4929 stats->tx_packets = tx_packets;
4930 stats->tx_dropped = tx_dropped;
4931 }
4932 return stats;
4933 }
Rusty Russellc45d2862007-03-28 14:29:08 -07004934}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08004935EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07004936
David S. Millerdc2b4842008-07-08 17:18:23 -07004937static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004938 struct netdev_queue *queue,
4939 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004940{
David S. Millerdc2b4842008-07-08 17:18:23 -07004941 queue->dev = dev;
4942}
4943
David S. Millerbb949fb2008-07-08 16:55:56 -07004944static void netdev_init_queues(struct net_device *dev)
4945{
David S. Millere8a04642008-07-17 00:34:19 -07004946 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4947 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07004948 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07004949}
4950
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004952 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 * @sizeof_priv: size of private data to allocate space for
4954 * @name: device name format string
4955 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004956 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 *
4958 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004959 * and performs basic initialization. Also allocates subquue structs
4960 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004962struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4963 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964{
David S. Millere8a04642008-07-17 00:34:19 -07004965 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07004967 size_t alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004968 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004970 BUG_ON(strlen(name) >= sizeof(dev->name));
4971
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004972 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004973 if (sizeof_priv) {
4974 /* ensure 32-byte alignment of private area */
4975 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4976 alloc_size += sizeof_priv;
4977 }
4978 /* ensure 32-byte alignment of whole construct */
4979 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004981 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004983 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 return NULL;
4985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986
Stephen Hemminger79439862008-07-21 13:28:44 -07004987 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07004988 if (!tx) {
4989 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4990 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00004991 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07004992 }
4993
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 dev = (struct net_device *)
4995 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4996 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00004997
4998 if (dev_addr_init(dev))
4999 goto free_tx;
5000
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005001 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002
David S. Millere8a04642008-07-17 00:34:19 -07005003 dev->_tx = tx;
5004 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005005 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005006
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005007 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008
David S. Millerbb949fb2008-07-08 16:55:56 -07005009 netdev_init_queues(dev);
5010
Herbert Xud565b0a2008-12-15 23:38:52 -08005011 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005012 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 setup(dev);
5014 strcpy(dev->name, name);
5015 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005016
5017free_tx:
5018 kfree(tx);
5019
5020free_p:
5021 kfree(p);
5022 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005024EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025
5026/**
5027 * free_netdev - free network device
5028 * @dev: device
5029 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005030 * This function does the last stage of destroying an allocated device
5031 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 * If this is the last reference then it will be freed.
5033 */
5034void free_netdev(struct net_device *dev)
5035{
Herbert Xud565b0a2008-12-15 23:38:52 -08005036 struct napi_struct *p, *n;
5037
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005038 release_net(dev_net(dev));
5039
David S. Millere8a04642008-07-17 00:34:19 -07005040 kfree(dev->_tx);
5041
Jiri Pirkof001fde2009-05-05 02:48:28 +00005042 /* Flush device addresses */
5043 dev_addr_flush(dev);
5044
Herbert Xud565b0a2008-12-15 23:38:52 -08005045 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5046 netif_napi_del(p);
5047
Stephen Hemminger3041a062006-05-26 13:25:24 -07005048 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 if (dev->reg_state == NETREG_UNINITIALIZED) {
5050 kfree((char *)dev - dev->padded);
5051 return;
5052 }
5053
5054 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5055 dev->reg_state = NETREG_RELEASED;
5056
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005057 /* will free via device release */
5058 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005060
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005061/**
5062 * synchronize_net - Synchronize with packet receive processing
5063 *
5064 * Wait for packets currently being received to be done.
5065 * Does not block later packets from starting.
5066 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005067void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068{
5069 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005070 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071}
5072
5073/**
5074 * unregister_netdevice - remove device from the kernel
5075 * @dev: device
5076 *
5077 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005078 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079 *
5080 * Callers must hold the rtnl semaphore. You may want
5081 * unregister_netdev() instead of this.
5082 */
5083
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005084void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085{
Herbert Xua6620712007-12-12 19:21:56 -08005086 ASSERT_RTNL();
5087
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005088 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 /* Finish processing unregister after unlock */
5090 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091}
5092
5093/**
5094 * unregister_netdev - remove device from the kernel
5095 * @dev: device
5096 *
5097 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005098 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099 *
5100 * This is just a wrapper for unregister_netdevice that takes
5101 * the rtnl semaphore. In general you want to use this and not
5102 * unregister_netdevice.
5103 */
5104void unregister_netdev(struct net_device *dev)
5105{
5106 rtnl_lock();
5107 unregister_netdevice(dev);
5108 rtnl_unlock();
5109}
5110
5111EXPORT_SYMBOL(unregister_netdev);
5112
Eric W. Biedermance286d32007-09-12 13:53:49 +02005113/**
5114 * dev_change_net_namespace - move device to different nethost namespace
5115 * @dev: device
5116 * @net: network namespace
5117 * @pat: If not NULL name pattern to try if the current device name
5118 * is already taken in the destination network namespace.
5119 *
5120 * This function shuts down a device interface and moves it
5121 * to a new network namespace. On success 0 is returned, on
5122 * a failure a netagive errno code is returned.
5123 *
5124 * Callers must hold the rtnl semaphore.
5125 */
5126
5127int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5128{
5129 char buf[IFNAMSIZ];
5130 const char *destname;
5131 int err;
5132
5133 ASSERT_RTNL();
5134
5135 /* Don't allow namespace local devices to be moved. */
5136 err = -EINVAL;
5137 if (dev->features & NETIF_F_NETNS_LOCAL)
5138 goto out;
5139
Eric W. Biederman38918452008-10-27 17:51:47 -07005140#ifdef CONFIG_SYSFS
5141 /* Don't allow real devices to be moved when sysfs
5142 * is enabled.
5143 */
5144 err = -EINVAL;
5145 if (dev->dev.parent)
5146 goto out;
5147#endif
5148
Eric W. Biedermance286d32007-09-12 13:53:49 +02005149 /* Ensure the device has been registrered */
5150 err = -EINVAL;
5151 if (dev->reg_state != NETREG_REGISTERED)
5152 goto out;
5153
5154 /* Get out if there is nothing todo */
5155 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005156 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005157 goto out;
5158
5159 /* Pick the destination device name, and ensure
5160 * we can use it in the destination network namespace.
5161 */
5162 err = -EEXIST;
5163 destname = dev->name;
5164 if (__dev_get_by_name(net, destname)) {
5165 /* We get here if we can't use the current device name */
5166 if (!pat)
5167 goto out;
5168 if (!dev_valid_name(pat))
5169 goto out;
5170 if (strchr(pat, '%')) {
5171 if (__dev_alloc_name(net, pat, buf) < 0)
5172 goto out;
5173 destname = buf;
5174 } else
5175 destname = pat;
5176 if (__dev_get_by_name(net, destname))
5177 goto out;
5178 }
5179
5180 /*
5181 * And now a mini version of register_netdevice unregister_netdevice.
5182 */
5183
5184 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005185 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005186
5187 /* And unlink it from device chain */
5188 err = -ENODEV;
5189 unlist_netdevice(dev);
5190
5191 synchronize_net();
5192
5193 /* Shutdown queueing discipline. */
5194 dev_shutdown(dev);
5195
5196 /* Notify protocols, that we are about to destroy
5197 this device. They should clean all the things.
5198 */
5199 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5200
5201 /*
5202 * Flush the unicast and multicast chains
5203 */
5204 dev_addr_discard(dev);
5205
Eric W. Biederman38918452008-10-27 17:51:47 -07005206 netdev_unregister_kobject(dev);
5207
Eric W. Biedermance286d32007-09-12 13:53:49 +02005208 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005209 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005210
5211 /* Assign the new device name */
5212 if (destname != dev->name)
5213 strcpy(dev->name, destname);
5214
5215 /* If there is an ifindex conflict assign a new one */
5216 if (__dev_get_by_index(net, dev->ifindex)) {
5217 int iflink = (dev->iflink == dev->ifindex);
5218 dev->ifindex = dev_new_index(net);
5219 if (iflink)
5220 dev->iflink = dev->ifindex;
5221 }
5222
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005223 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005224 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005225 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005226
5227 /* Add the device back in the hashes */
5228 list_netdevice(dev);
5229
5230 /* Notify protocols, that a new device appeared. */
5231 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5232
5233 synchronize_net();
5234 err = 0;
5235out:
5236 return err;
5237}
5238
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239static int dev_cpu_callback(struct notifier_block *nfb,
5240 unsigned long action,
5241 void *ocpu)
5242{
5243 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005244 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 struct sk_buff *skb;
5246 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5247 struct softnet_data *sd, *oldsd;
5248
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005249 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 return NOTIFY_OK;
5251
5252 local_irq_disable();
5253 cpu = smp_processor_id();
5254 sd = &per_cpu(softnet_data, cpu);
5255 oldsd = &per_cpu(softnet_data, oldcpu);
5256
5257 /* Find end of our completion_queue. */
5258 list_skb = &sd->completion_queue;
5259 while (*list_skb)
5260 list_skb = &(*list_skb)->next;
5261 /* Append completion queue from offline CPU. */
5262 *list_skb = oldsd->completion_queue;
5263 oldsd->completion_queue = NULL;
5264
5265 /* Find end of our output_queue. */
5266 list_net = &sd->output_queue;
5267 while (*list_net)
5268 list_net = &(*list_net)->next_sched;
5269 /* Append output queue from offline CPU. */
5270 *list_net = oldsd->output_queue;
5271 oldsd->output_queue = NULL;
5272
5273 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5274 local_irq_enable();
5275
5276 /* Process offline CPU's input_pkt_queue */
5277 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5278 netif_rx(skb);
5279
5280 return NOTIFY_OK;
5281}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005282
5283
Herbert Xu7f353bf2007-08-10 15:47:58 -07005284/**
Herbert Xub63365a2008-10-23 01:11:29 -07005285 * netdev_increment_features - increment feature set by one
5286 * @all: current feature set
5287 * @one: new feature set
5288 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005289 *
5290 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005291 * @one to the master device with current feature set @all. Will not
5292 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005293 */
Herbert Xub63365a2008-10-23 01:11:29 -07005294unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5295 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005296{
Herbert Xub63365a2008-10-23 01:11:29 -07005297 /* If device needs checksumming, downgrade to it. */
5298 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5299 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5300 else if (mask & NETIF_F_ALL_CSUM) {
5301 /* If one device supports v4/v6 checksumming, set for all. */
5302 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5303 !(all & NETIF_F_GEN_CSUM)) {
5304 all &= ~NETIF_F_ALL_CSUM;
5305 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5306 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005307
Herbert Xub63365a2008-10-23 01:11:29 -07005308 /* If one device supports hw checksumming, set for all. */
5309 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5310 all &= ~NETIF_F_ALL_CSUM;
5311 all |= NETIF_F_HW_CSUM;
5312 }
5313 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005314
Herbert Xub63365a2008-10-23 01:11:29 -07005315 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005316
Herbert Xub63365a2008-10-23 01:11:29 -07005317 one |= all & NETIF_F_ONE_FOR_ALL;
5318 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5319 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005320
5321 return all;
5322}
Herbert Xub63365a2008-10-23 01:11:29 -07005323EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005324
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005325static struct hlist_head *netdev_create_hash(void)
5326{
5327 int i;
5328 struct hlist_head *hash;
5329
5330 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5331 if (hash != NULL)
5332 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5333 INIT_HLIST_HEAD(&hash[i]);
5334
5335 return hash;
5336}
5337
Eric W. Biederman881d9662007-09-17 11:56:21 -07005338/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005339static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005340{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005341 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005342
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005343 net->dev_name_head = netdev_create_hash();
5344 if (net->dev_name_head == NULL)
5345 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005346
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005347 net->dev_index_head = netdev_create_hash();
5348 if (net->dev_index_head == NULL)
5349 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005350
5351 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005352
5353err_idx:
5354 kfree(net->dev_name_head);
5355err_name:
5356 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005357}
5358
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005359/**
5360 * netdev_drivername - network driver for the device
5361 * @dev: network device
5362 * @buffer: buffer for resulting name
5363 * @len: size of buffer
5364 *
5365 * Determine network driver for device.
5366 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005367char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005368{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005369 const struct device_driver *driver;
5370 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005371
5372 if (len <= 0 || !buffer)
5373 return buffer;
5374 buffer[0] = 0;
5375
5376 parent = dev->dev.parent;
5377
5378 if (!parent)
5379 return buffer;
5380
5381 driver = parent->driver;
5382 if (driver && driver->name)
5383 strlcpy(buffer, driver->name, len);
5384 return buffer;
5385}
5386
Pavel Emelyanov46650792007-10-08 20:38:39 -07005387static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005388{
5389 kfree(net->dev_name_head);
5390 kfree(net->dev_index_head);
5391}
5392
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005393static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005394 .init = netdev_init,
5395 .exit = netdev_exit,
5396};
5397
Pavel Emelyanov46650792007-10-08 20:38:39 -07005398static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005399{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005400 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005401 /*
5402 * Push all migratable of the network devices back to the
5403 * initial network namespace
5404 */
5405 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005406restart:
5407 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005408 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005409 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005410
5411 /* Ignore unmoveable devices (i.e. loopback) */
5412 if (dev->features & NETIF_F_NETNS_LOCAL)
5413 continue;
5414
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005415 /* Delete virtual devices */
5416 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5417 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005418 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005419 }
5420
Eric W. Biedermance286d32007-09-12 13:53:49 +02005421 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005422 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5423 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005424 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005425 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005426 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005427 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005428 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005429 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005430 }
5431 rtnl_unlock();
5432}
5433
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005434static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005435 .exit = default_device_exit,
5436};
5437
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438/*
5439 * Initialize the DEV module. At boot time this walks the device list and
5440 * unhooks any devices that fail to initialise (normally hardware not
5441 * present) and leaves us with a valid list of present and active devices.
5442 *
5443 */
5444
5445/*
5446 * This is called single threaded during boot, so no need
5447 * to take the rtnl semaphore.
5448 */
5449static int __init net_dev_init(void)
5450{
5451 int i, rc = -ENOMEM;
5452
5453 BUG_ON(!dev_boot_phase);
5454
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455 if (dev_proc_init())
5456 goto out;
5457
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005458 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005459 goto out;
5460
5461 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005462 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005463 INIT_LIST_HEAD(&ptype_base[i]);
5464
Eric W. Biederman881d9662007-09-17 11:56:21 -07005465 if (register_pernet_subsys(&netdev_net_ops))
5466 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005467
5468 /*
5469 * Initialise the packet receive queues.
5470 */
5471
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005472 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005473 struct softnet_data *queue;
5474
5475 queue = &per_cpu(softnet_data, i);
5476 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005477 queue->completion_queue = NULL;
5478 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005479
5480 queue->backlog.poll = process_backlog;
5481 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005482 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005483 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005484 }
5485
Linus Torvalds1da177e2005-04-16 15:20:36 -07005486 dev_boot_phase = 0;
5487
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005488 /* The loopback device is special if any other network devices
5489 * is present in a network namespace the loopback device must
5490 * be present. Since we now dynamically allocate and free the
5491 * loopback device ensure this invariant is maintained by
5492 * keeping the loopback device as the first device on the
5493 * list of network devices. Ensuring the loopback devices
5494 * is the first device that appears and the last network device
5495 * that disappears.
5496 */
5497 if (register_pernet_device(&loopback_net_ops))
5498 goto out;
5499
5500 if (register_pernet_device(&default_device_ops))
5501 goto out;
5502
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005503 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5504 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005505
5506 hotcpu_notifier(dev_cpu_callback, 0);
5507 dst_init();
5508 dev_mcast_init();
5509 rc = 0;
5510out:
5511 return rc;
5512}
5513
5514subsys_initcall(net_dev_init);
5515
Krishna Kumare88721f2009-02-18 17:55:02 -08005516static int __init initialize_hashrnd(void)
5517{
5518 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5519 return 0;
5520}
5521
5522late_initcall_sync(initialize_hashrnd);
5523
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524EXPORT_SYMBOL(__dev_get_by_index);
5525EXPORT_SYMBOL(__dev_get_by_name);
5526EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005527EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005528EXPORT_SYMBOL(dev_add_pack);
5529EXPORT_SYMBOL(dev_alloc_name);
5530EXPORT_SYMBOL(dev_close);
5531EXPORT_SYMBOL(dev_get_by_flags);
5532EXPORT_SYMBOL(dev_get_by_index);
5533EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534EXPORT_SYMBOL(dev_open);
5535EXPORT_SYMBOL(dev_queue_xmit);
5536EXPORT_SYMBOL(dev_remove_pack);
5537EXPORT_SYMBOL(dev_set_allmulti);
5538EXPORT_SYMBOL(dev_set_promiscuity);
5539EXPORT_SYMBOL(dev_change_flags);
5540EXPORT_SYMBOL(dev_set_mtu);
5541EXPORT_SYMBOL(dev_set_mac_address);
5542EXPORT_SYMBOL(free_netdev);
5543EXPORT_SYMBOL(netdev_boot_setup_check);
5544EXPORT_SYMBOL(netdev_set_master);
5545EXPORT_SYMBOL(netdev_state_change);
5546EXPORT_SYMBOL(netif_receive_skb);
5547EXPORT_SYMBOL(netif_rx);
5548EXPORT_SYMBOL(register_gifconf);
5549EXPORT_SYMBOL(register_netdevice);
5550EXPORT_SYMBOL(register_netdevice_notifier);
5551EXPORT_SYMBOL(skb_checksum_help);
5552EXPORT_SYMBOL(synchronize_net);
5553EXPORT_SYMBOL(unregister_netdevice);
5554EXPORT_SYMBOL(unregister_netdevice_notifier);
5555EXPORT_SYMBOL(net_enable_timestamp);
5556EXPORT_SYMBOL(net_disable_timestamp);
5557EXPORT_SYMBOL(dev_get_flags);
5558
5559#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5560EXPORT_SYMBOL(br_handle_frame_hook);
5561EXPORT_SYMBOL(br_fdb_get_hook);
5562EXPORT_SYMBOL(br_fdb_put_hook);
5563#endif
5564
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566
5567EXPORT_PER_CPU_SYMBOL(softnet_data);