blob: 34b49a6a22fdbf2d9989b556371eea3f78b93036 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Neil Horman4ea7e382009-05-21 07:36:08 +0000129#include <trace/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273
274static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290
291static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700292static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
294static inline unsigned short netdev_lock_pos(unsigned short dev_type)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 if (netdev_lock_type[i] == dev_type)
300 return i;
301 /* the last key is used by default */
302 return ARRAY_SIZE(netdev_lock_type) - 1;
303}
304
David S. Millercf508b12008-07-22 14:16:42 -0700305static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700307{
308 int i;
309
310 i = netdev_lock_pos(dev_type);
311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 netdev_lock_name[i]);
313}
David S. Millercf508b12008-07-22 14:16:42 -0700314
315static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
316{
317 int i;
318
319 i = netdev_lock_pos(dev->type);
320 lockdep_set_class_and_name(&dev->addr_list_lock,
321 &netdev_addr_lock_key[i],
322 netdev_lock_name[i]);
323}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700324#else
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
327{
328}
329static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330{
331}
332#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/*******************************************************************************
335
336 Protocol management and registration routines
337
338*******************************************************************************/
339
340/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * Add a protocol ID to the list. Now that the input handler is
342 * smarter we can dispense with all the messy stuff that used to be
343 * here.
344 *
345 * BEWARE!!! Protocol handlers, mangling input packets,
346 * MUST BE last in hash buckets and checking protocol handlers
347 * MUST start from promiscuous ptype_all chain in net_bh.
348 * It is true now, do not change it.
349 * Explanation follows: if protocol handler, mangling packet, will
350 * be the first on list, it is not able to sense, that packet
351 * is cloned and should be copied-on-write, so that it will
352 * change it and subsequent readers will get broken packet.
353 * --ANK (980803)
354 */
355
356/**
357 * dev_add_pack - add packet handler
358 * @pt: packet type declaration
359 *
360 * Add a protocol handler to the networking stack. The passed &packet_type
361 * is linked into kernel lists and may not be freed until it has been
362 * removed from the kernel lists.
363 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900364 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * guarantee all CPU's that are in middle of receiving packets
366 * will see the new packet type (until the next received packet).
367 */
368
369void dev_add_pack(struct packet_type *pt)
370{
371 int hash;
372
373 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700374 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800377 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 list_add_rcu(&pt->list, &ptype_base[hash]);
379 }
380 spin_unlock_bh(&ptype_lock);
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
386 *
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900390 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
395 */
396void __dev_remove_pack(struct packet_type *pt)
397{
398 struct list_head *head;
399 struct packet_type *pt1;
400
401 spin_lock_bh(&ptype_lock);
402
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700403 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 list_for_each_entry(pt1, head, list) {
409 if (pt == pt1) {
410 list_del_rcu(&pt->list);
411 goto out;
412 }
413 }
414
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416out:
417 spin_unlock_bh(&ptype_lock);
418}
419/**
420 * dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
422 *
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
426 * returns.
427 *
428 * This call sleeps to guarantee that no CPU is looking at the packet
429 * type after return.
430 */
431void dev_remove_pack(struct packet_type *pt)
432{
433 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 synchronize_net();
436}
437
438/******************************************************************************
439
440 Device Boot-time Settings Routines
441
442*******************************************************************************/
443
444/* Boot time configuration table */
445static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
446
447/**
448 * netdev_boot_setup_add - add new setup entry
449 * @name: name of the device
450 * @map: configured settings for the device
451 *
452 * Adds new setup entry to the dev_boot_setup list. The function
453 * returns 0 on error and 1 on success. This is a generic routine to
454 * all netdevices.
455 */
456static int netdev_boot_setup_add(char *name, struct ifmap *map)
457{
458 struct netdev_boot_setup *s;
459 int i;
460
461 s = dev_boot_setup;
462 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
463 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
464 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700465 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 memcpy(&s[i].map, map, sizeof(s[i].map));
467 break;
468 }
469 }
470
471 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
472}
473
474/**
475 * netdev_boot_setup_check - check boot time settings
476 * @dev: the netdevice
477 *
478 * Check boot time settings for the device.
479 * The found settings are set for the device to be used
480 * later in the device probing.
481 * Returns 0 if no settings found, 1 if they are.
482 */
483int netdev_boot_setup_check(struct net_device *dev)
484{
485 struct netdev_boot_setup *s = dev_boot_setup;
486 int i;
487
488 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
489 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700490 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 dev->irq = s[i].map.irq;
492 dev->base_addr = s[i].map.base_addr;
493 dev->mem_start = s[i].map.mem_start;
494 dev->mem_end = s[i].map.mem_end;
495 return 1;
496 }
497 }
498 return 0;
499}
500
501
502/**
503 * netdev_boot_base - get address from boot time settings
504 * @prefix: prefix for network device
505 * @unit: id for network device
506 *
507 * Check boot time settings for the base address of device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found.
511 */
512unsigned long netdev_boot_base(const char *prefix, int unit)
513{
514 const struct netdev_boot_setup *s = dev_boot_setup;
515 char name[IFNAMSIZ];
516 int i;
517
518 sprintf(name, "%s%d", prefix, unit);
519
520 /*
521 * If device already registered then return base of 1
522 * to indicate not to probe for this interface
523 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700524 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return 1;
526
527 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
528 if (!strcmp(name, s[i].name))
529 return s[i].map.base_addr;
530 return 0;
531}
532
533/*
534 * Saves at boot time configured settings for any netdevice.
535 */
536int __init netdev_boot_setup(char *str)
537{
538 int ints[5];
539 struct ifmap map;
540
541 str = get_options(str, ARRAY_SIZE(ints), ints);
542 if (!str || !*str)
543 return 0;
544
545 /* Save settings */
546 memset(&map, 0, sizeof(map));
547 if (ints[0] > 0)
548 map.irq = ints[1];
549 if (ints[0] > 1)
550 map.base_addr = ints[2];
551 if (ints[0] > 2)
552 map.mem_start = ints[3];
553 if (ints[0] > 3)
554 map.mem_end = ints[4];
555
556 /* Add new entry to the list */
557 return netdev_boot_setup_add(str, &map);
558}
559
560__setup("netdev=", netdev_boot_setup);
561
562/*******************************************************************************
563
564 Device Interface Subroutines
565
566*******************************************************************************/
567
568/**
569 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700570 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * @name: name to find
572 *
573 * Find an interface by name. Must be called under RTNL semaphore
574 * or @dev_base_lock. If the name is found a pointer to the device
575 * is returned. If the name is not found then %NULL is returned. The
576 * reference counters are not incremented so the caller must be
577 * careful with locks.
578 */
579
Eric W. Biederman881d9662007-09-17 11:56:21 -0700580struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 struct hlist_node *p;
583
Eric W. Biederman881d9662007-09-17 11:56:21 -0700584 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct net_device *dev
586 = hlist_entry(p, struct net_device, name_hlist);
587 if (!strncmp(dev->name, name, IFNAMSIZ))
588 return dev;
589 }
590 return NULL;
591}
592
593/**
594 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700595 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @name: name to find
597 *
598 * Find an interface by name. This can be called from any
599 * context and does its own locking. The returned handle has
600 * the usage count incremented and the caller must use dev_put() to
601 * release it when it is no longer needed. %NULL is returned if no
602 * matching device is found.
603 */
604
Eric W. Biederman881d9662007-09-17 11:56:21 -0700605struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct net_device *dev;
608
609 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 if (dev)
612 dev_hold(dev);
613 read_unlock(&dev_base_lock);
614 return dev;
615}
616
617/**
618 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700619 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 * @ifindex: index of device
621 *
622 * Search for an interface by index. Returns %NULL if the device
623 * is not found or a pointer to the device. The device has not
624 * had its reference counter increased so the caller must be careful
625 * about locking. The caller must hold either the RTNL semaphore
626 * or @dev_base_lock.
627 */
628
Eric W. Biederman881d9662007-09-17 11:56:21 -0700629struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 struct hlist_node *p;
632
Eric W. Biederman881d9662007-09-17 11:56:21 -0700633 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 struct net_device *dev
635 = hlist_entry(p, struct net_device, index_hlist);
636 if (dev->ifindex == ifindex)
637 return dev;
638 }
639 return NULL;
640}
641
642
643/**
644 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700645 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * @ifindex: index of device
647 *
648 * Search for an interface by index. Returns NULL if the device
649 * is not found or a pointer to the device. The device returned has
650 * had a reference added and the pointer is safe until the user calls
651 * dev_put to indicate they have finished with it.
652 */
653
Eric W. Biederman881d9662007-09-17 11:56:21 -0700654struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 struct net_device *dev;
657
658 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700659 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (dev)
661 dev_hold(dev);
662 read_unlock(&dev_base_lock);
663 return dev;
664}
665
666/**
667 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700668 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 * @type: media type of device
670 * @ha: hardware address
671 *
672 * Search for an interface by MAC address. Returns NULL if the device
673 * is not found or a pointer to the device. The caller must hold the
674 * rtnl semaphore. The returned device has not had its ref count increased
675 * and the caller must therefore be careful about locking
676 *
677 * BUGS:
678 * If the API was consistent this would be __dev_get_by_hwaddr
679 */
680
Eric W. Biederman881d9662007-09-17 11:56:21 -0700681struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct net_device *dev;
684
685 ASSERT_RTNL();
686
Denis V. Lunev81103a52007-12-12 10:47:38 -0800687 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (dev->type == type &&
689 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700690 return dev;
691
692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300695EXPORT_SYMBOL(dev_getbyhwaddr);
696
Eric W. Biederman881d9662007-09-17 11:56:21 -0700697struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700698{
699 struct net_device *dev;
700
701 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700702 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700703 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700704 return dev;
705
706 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700707}
708
709EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710
Eric W. Biederman881d9662007-09-17 11:56:21 -0700711struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 struct net_device *dev;
714
715 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717 if (dev)
718 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 rtnl_unlock();
720 return dev;
721}
722
723EXPORT_SYMBOL(dev_getfirstbyhwtype);
724
725/**
726 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700727 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 * @if_flags: IFF_* values
729 * @mask: bitmask of bits in if_flags to check
730 *
731 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900732 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * had a reference added and the pointer is safe until the user calls
734 * dev_put to indicate they have finished with it.
735 */
736
Eric W. Biederman881d9662007-09-17 11:56:21 -0700737struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (((dev->flags ^ if_flags) & mask) == 0) {
745 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700746 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 break;
748 }
749 }
750 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/**
755 * dev_valid_name - check if name is okay for network device
756 * @name: name string
757 *
758 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700759 * to allow sysfs to work. We also disallow any kind of
760 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800762int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700764 if (*name == '\0')
765 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700766 if (strlen(name) >= IFNAMSIZ)
767 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700768 if (!strcmp(name, ".") || !strcmp(name, ".."))
769 return 0;
770
771 while (*name) {
772 if (*name == '/' || isspace(*name))
773 return 0;
774 name++;
775 }
776 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200780 * __dev_alloc_name - allocate a name for a device
781 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200783 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
785 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700786 * id. It scans list of devices to build up a free map, then chooses
787 * the first empty slot. The caller must hold the dev_base or rtnl lock
788 * while allocating the name and adding the device in order to avoid
789 * duplicates.
790 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
791 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 const char *p;
798 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700799 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct net_device *d;
801
802 p = strnchr(name, IFNAMSIZ-1, '%');
803 if (p) {
804 /*
805 * Verify the string as this thing may have come from
806 * the user. There must be either one "%d" and no other "%"
807 * characters.
808 */
809 if (p[1] != 'd' || strchr(p + 2, '%'))
810 return -EINVAL;
811
812 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700813 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!inuse)
815 return -ENOMEM;
816
Eric W. Biederman881d9662007-09-17 11:56:21 -0700817 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!sscanf(d->name, name, &i))
819 continue;
820 if (i < 0 || i >= max_netdevices)
821 continue;
822
823 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200824 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!strncmp(buf, d->name, IFNAMSIZ))
826 set_bit(i, inuse);
827 }
828
829 i = find_first_zero_bit(inuse, max_netdevices);
830 free_page((unsigned long) inuse);
831 }
832
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200833 snprintf(buf, IFNAMSIZ, name, i);
834 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /* It is possible to run out of possible slots
838 * when the name is long and there isn't enough space left
839 * for the digits, or if all bits are used.
840 */
841 return -ENFILE;
842}
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844/**
845 * dev_alloc_name - allocate a name for a device
846 * @dev: device
847 * @name: name format string
848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
856 */
857
858int dev_alloc_name(struct net_device *dev, const char *name)
859{
860 char buf[IFNAMSIZ];
861 struct net *net;
862 int ret;
863
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900864 BUG_ON(!dev_net(dev));
865 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200866 ret = __dev_alloc_name(net, name, buf);
867 if (ret >= 0)
868 strlcpy(dev->name, buf, IFNAMSIZ);
869 return ret;
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873/**
874 * dev_change_name - change name of a device
875 * @dev: device
876 * @newname: name (or format string) must be at least IFNAMSIZ
877 *
878 * Change name of a device, can pass format strings "eth%d".
879 * for wildcarding.
880 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700881int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Herbert Xufcc5a032007-07-30 17:03:38 -0700883 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700886 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900889 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (dev->flags & IFF_UP)
893 return -EBUSY;
894
895 if (!dev_valid_name(newname))
896 return -EINVAL;
897
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700898 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
899 return 0;
900
Herbert Xufcc5a032007-07-30 17:03:38 -0700901 memcpy(oldname, dev->name, IFNAMSIZ);
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (strchr(newname, '%')) {
904 err = dev_alloc_name(dev, newname);
905 if (err < 0)
906 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700908 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return -EEXIST;
910 else
911 strlcpy(dev->name, newname, IFNAMSIZ);
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700914 /* For now only devices in the initial network namespace
915 * are in sysfs.
916 */
917 if (net == &init_net) {
918 ret = device_rename(&dev->dev, dev->name);
919 if (ret) {
920 memcpy(dev->name, oldname, IFNAMSIZ);
921 return ret;
922 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700923 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700924
925 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600926 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700927 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700928 write_unlock_bh(&dev_base_lock);
929
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700930 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700931 ret = notifier_to_errno(ret);
932
933 if (ret) {
934 if (err) {
935 printk(KERN_ERR
936 "%s: name change rollback failed: %d.\n",
937 dev->name, ret);
938 } else {
939 err = ret;
940 memcpy(dev->name, oldname, IFNAMSIZ);
941 goto rollback;
942 }
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 return err;
946}
947
948/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700949 * dev_set_alias - change ifalias of a device
950 * @dev: device
951 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700952 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700953 *
954 * Set ifalias for a device,
955 */
956int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
957{
958 ASSERT_RTNL();
959
960 if (len >= IFALIASZ)
961 return -EINVAL;
962
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700963 if (!len) {
964 if (dev->ifalias) {
965 kfree(dev->ifalias);
966 dev->ifalias = NULL;
967 }
968 return 0;
969 }
970
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700971 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
972 if (!dev->ifalias)
973 return -ENOMEM;
974
975 strlcpy(dev->ifalias, alias, len+1);
976 return len;
977}
978
979
980/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700981 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700982 * @dev: device to cause notification
983 *
984 * Called to indicate a device has changed features.
985 */
986void netdev_features_change(struct net_device *dev)
987{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700988 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700989}
990EXPORT_SYMBOL(netdev_features_change);
991
992/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * netdev_state_change - device changes state
994 * @dev: device to cause notification
995 *
996 * Called to indicate a device has changed state. This function calls
997 * the notifier chains for netdev_chain and sends a NEWLINK message
998 * to the routing socket.
999 */
1000void netdev_state_change(struct net_device *dev)
1001{
1002 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001003 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1005 }
1006}
1007
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001008void netdev_bonding_change(struct net_device *dev)
1009{
1010 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011}
1012EXPORT_SYMBOL(netdev_bonding_change);
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014/**
1015 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001016 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 * @name: name of interface
1018 *
1019 * If a network interface is not present and the process has suitable
1020 * privileges this function loads the module. If module loading is not
1021 * available in this kernel then it becomes a nop.
1022 */
1023
Eric W. Biederman881d9662007-09-17 11:56:21 -07001024void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001026 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001029 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 read_unlock(&dev_base_lock);
1031
1032 if (!dev && capable(CAP_SYS_MODULE))
1033 request_module("%s", name);
1034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036/**
1037 * dev_open - prepare an interface for use.
1038 * @dev: device to open
1039 *
1040 * Takes a device from down to up state. The device's private open
1041 * function is invoked and then the multicast lists are loaded. Finally
1042 * the device is moved into the up state and a %NETDEV_UP message is
1043 * sent to the netdev notifier chain.
1044 *
1045 * Calling this function on an active interface is a nop. On a failure
1046 * a negative errno code is returned.
1047 */
1048int dev_open(struct net_device *dev)
1049{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001050 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 int ret = 0;
1052
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001053 ASSERT_RTNL();
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 /*
1056 * Is it already up?
1057 */
1058
1059 if (dev->flags & IFF_UP)
1060 return 0;
1061
1062 /*
1063 * Is it even present?
1064 */
1065 if (!netif_device_present(dev))
1066 return -ENODEV;
1067
1068 /*
1069 * Call device private open method
1070 */
1071 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001072
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001073 if (ops->ndo_validate_addr)
1074 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001075
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001076 if (!ret && ops->ndo_open)
1077 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 * If it went open OK then:
1081 */
1082
Jeff Garzikbada3392007-10-23 20:19:37 -07001083 if (ret)
1084 clear_bit(__LINK_STATE_START, &dev->state);
1085 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * Set the flags.
1088 */
1089 dev->flags |= IFF_UP;
1090
1091 /*
Dan Williams649274d2009-01-11 00:20:39 -08001092 * Enable NET_DMA
1093 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001094 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001095
1096 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * Initialize multicasting status
1098 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001099 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /*
1102 * Wakeup transmit queue engine
1103 */
1104 dev_activate(dev);
1105
1106 /*
1107 * ... and announce new interface.
1108 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001109 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return ret;
1113}
1114
1115/**
1116 * dev_close - shutdown an interface.
1117 * @dev: device to shutdown
1118 *
1119 * This function moves an active device into down state. A
1120 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1121 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1122 * chain.
1123 */
1124int dev_close(struct net_device *dev)
1125{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001126 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001127 ASSERT_RTNL();
1128
David S. Miller9d5010d2007-09-12 14:33:25 +02001129 might_sleep();
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001150 dev_deactivate(dev);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001159 if (ops->ndo_stop)
1160 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001171 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Dan Williams649274d2009-01-11 00:20:39 -08001173 /*
1174 * Shutdown NET_DMA
1175 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001176 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 return 0;
1179}
1180
1181
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001182/**
1183 * dev_disable_lro - disable Large Receive Offload on a device
1184 * @dev: device
1185 *
1186 * Disable Large Receive Offload (LRO) on a net device. Must be
1187 * called under RTNL. This is needed if received packets may be
1188 * forwarded to another interface.
1189 */
1190void dev_disable_lro(struct net_device *dev)
1191{
1192 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1193 dev->ethtool_ops->set_flags) {
1194 u32 flags = dev->ethtool_ops->get_flags(dev);
1195 if (flags & ETH_FLAG_LRO) {
1196 flags &= ~ETH_FLAG_LRO;
1197 dev->ethtool_ops->set_flags(dev, flags);
1198 }
1199 }
1200 WARN_ON(dev->features & NETIF_F_LRO);
1201}
1202EXPORT_SYMBOL(dev_disable_lro);
1203
1204
Eric W. Biederman881d9662007-09-17 11:56:21 -07001205static int dev_boot_phase = 1;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/*
1208 * Device change register/unregister. These are not inline or static
1209 * as we export them to the world.
1210 */
1211
1212/**
1213 * register_netdevice_notifier - register a network notifier block
1214 * @nb: notifier
1215 *
1216 * Register a notifier to be called when network device events occur.
1217 * The notifier passed is linked into the kernel structures and must
1218 * not be reused until it has been unregistered. A negative errno code
1219 * is returned on a failure.
1220 *
1221 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001222 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 * view of the network device list.
1224 */
1225
1226int register_netdevice_notifier(struct notifier_block *nb)
1227{
1228 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001229 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001230 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 int err;
1232
1233 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001234 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001235 if (err)
1236 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 if (dev_boot_phase)
1238 goto unlock;
1239 for_each_net(net) {
1240 for_each_netdev(net, dev) {
1241 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1242 err = notifier_to_errno(err);
1243 if (err)
1244 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Eric W. Biederman881d9662007-09-17 11:56:21 -07001246 if (!(dev->flags & IFF_UP))
1247 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001248
Eric W. Biederman881d9662007-09-17 11:56:21 -07001249 nb->notifier_call(nb, NETDEV_UP, dev);
1250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001252
1253unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 rtnl_unlock();
1255 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001256
1257rollback:
1258 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001259 for_each_net(net) {
1260 for_each_netdev(net, dev) {
1261 if (dev == last)
1262 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
Eric W. Biederman881d9662007-09-17 11:56:21 -07001264 if (dev->flags & IFF_UP) {
1265 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1266 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 }
1268 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001269 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001270 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001271
1272 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001273 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/**
1277 * unregister_netdevice_notifier - unregister a network notifier block
1278 * @nb: notifier
1279 *
1280 * Unregister a notifier previously registered by
1281 * register_netdevice_notifier(). The notifier is unlinked into the
1282 * kernel structures and may then be reused. A negative errno code
1283 * is returned on a failure.
1284 */
1285
1286int unregister_netdevice_notifier(struct notifier_block *nb)
1287{
Herbert Xu9f514952006-03-25 01:24:25 -08001288 int err;
1289
1290 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001291 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001292 rtnl_unlock();
1293 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296/**
1297 * call_netdevice_notifiers - call all network notifier blocks
1298 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001299 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 *
1301 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001302 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
1304
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001305int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001307 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
1310/* When > 0 there are consumers of rx skb time stamps */
1311static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312
1313void net_enable_timestamp(void)
1314{
1315 atomic_inc(&netstamp_needed);
1316}
1317
1318void net_disable_timestamp(void)
1319{
1320 atomic_dec(&netstamp_needed);
1321}
1322
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001323static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001326 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001327 else
1328 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329}
1330
1331/*
1332 * Support routine. Sends outgoing frames to any network
1333 * taps currently in use.
1334 */
1335
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001336static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001339
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001340#ifdef CONFIG_NET_CLS_ACT
1341 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1342 net_timestamp(skb);
1343#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001344 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001345#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 rcu_read_lock();
1348 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1349 /* Never send packets back to the socket
1350 * they originated from - MvS (miquels@drinkel.ow.org)
1351 */
1352 if ((ptype->dev == dev || !ptype->dev) &&
1353 (ptype->af_packet_priv == NULL ||
1354 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1355 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1356 if (!skb2)
1357 break;
1358
1359 /* skb->nh should be correctly
1360 set by sender, so that the second statement is
1361 just protection against buggy protocols.
1362 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001363 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001365 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001366 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (net_ratelimit())
1368 printk(KERN_CRIT "protocol %04x is "
1369 "buggy, dev %s\n",
1370 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001371 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 }
1373
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001374 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001376 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 }
1379 rcu_read_unlock();
1380}
1381
Denis Vlasenko56079432006-03-29 15:57:29 -08001382
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001383static inline void __netif_reschedule(struct Qdisc *q)
1384{
1385 struct softnet_data *sd;
1386 unsigned long flags;
1387
1388 local_irq_save(flags);
1389 sd = &__get_cpu_var(softnet_data);
1390 q->next_sched = sd->output_queue;
1391 sd->output_queue = q;
1392 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1393 local_irq_restore(flags);
1394}
1395
David S. Miller37437bb2008-07-16 02:15:04 -07001396void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001397{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001398 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1399 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001400}
1401EXPORT_SYMBOL(__netif_schedule);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001405 if (atomic_dec_and_test(&skb->users)) {
1406 struct softnet_data *sd;
1407 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001408
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 skb->next = sd->completion_queue;
1412 sd->completion_queue = skb;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1415 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001416}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001417EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001418
1419void dev_kfree_skb_any(struct sk_buff *skb)
1420{
1421 if (in_irq() || irqs_disabled())
1422 dev_kfree_skb_irq(skb);
1423 else
1424 dev_kfree_skb(skb);
1425}
1426EXPORT_SYMBOL(dev_kfree_skb_any);
1427
1428
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001429/**
1430 * netif_device_detach - mark device as removed
1431 * @dev: network device
1432 *
1433 * Mark device as removed from system and therefore no longer available.
1434 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001435void netif_device_detach(struct net_device *dev)
1436{
1437 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1438 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001439 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001440 }
1441}
1442EXPORT_SYMBOL(netif_device_detach);
1443
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001444/**
1445 * netif_device_attach - mark device as attached
1446 * @dev: network device
1447 *
1448 * Mark device as attached from system and restart if needed.
1449 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001450void netif_device_attach(struct net_device *dev)
1451{
1452 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1453 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001454 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001456 }
1457}
1458EXPORT_SYMBOL(netif_device_attach);
1459
Ben Hutchings6de329e2008-06-16 17:02:28 -07001460static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1461{
1462 return ((features & NETIF_F_GEN_CSUM) ||
1463 ((features & NETIF_F_IP_CSUM) &&
1464 protocol == htons(ETH_P_IP)) ||
1465 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001466 protocol == htons(ETH_P_IPV6)) ||
1467 ((features & NETIF_F_FCOE_CRC) &&
1468 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001469}
1470
1471static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1472{
1473 if (can_checksum_protocol(dev->features, skb->protocol))
1474 return true;
1475
1476 if (skb->protocol == htons(ETH_P_8021Q)) {
1477 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1478 if (can_checksum_protocol(dev->features & dev->vlan_features,
1479 veh->h_vlan_encapsulated_proto))
1480 return true;
1481 }
1482
1483 return false;
1484}
Denis Vlasenko56079432006-03-29 15:57:29 -08001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486/*
1487 * Invalidate hardware checksum when packet is to be mangled, and
1488 * complete checksum manually on outgoing path.
1489 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001490int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
Al Virod3bc23e2006-11-14 21:24:49 -08001492 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001493 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Patrick McHardy84fa7932006-08-29 16:44:56 -07001495 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001496 goto out_set_summed;
1497
1498 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001499 /* Let GSO fix up the checksum. */
1500 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 }
1502
Herbert Xua0308472007-10-15 01:47:15 -07001503 offset = skb->csum_start - skb_headroom(skb);
1504 BUG_ON(offset >= skb_headlen(skb));
1505 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1506
1507 offset += skb->csum_offset;
1508 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1509
1510 if (skb_cloned(skb) &&
1511 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1513 if (ret)
1514 goto out;
1515 }
1516
Herbert Xua0308472007-10-15 01:47:15 -07001517 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001518out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001520out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001524/**
1525 * skb_gso_segment - Perform segmentation on skb.
1526 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001527 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001528 *
1529 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001530 *
1531 * It may return NULL if the skb requires no segmentation. This is
1532 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001533 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001534struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535{
1536 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1537 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001538 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001539 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001541 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001542 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001543 __skb_pull(skb, skb->mac_len);
1544
Herbert Xu67fd1a72009-01-19 16:26:44 -08001545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1546 struct net_device *dev = skb->dev;
1547 struct ethtool_drvinfo info = {};
1548
1549 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1550 dev->ethtool_ops->get_drvinfo(dev, &info);
1551
1552 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1553 "ip_summed=%d",
1554 info.driver, dev ? dev->features : 0L,
1555 skb->sk ? skb->sk->sk_route_caps : 0L,
1556 skb->len, skb->data_len, skb->ip_summed);
1557
Herbert Xua430a432006-07-08 13:34:56 -07001558 if (skb_header_cloned(skb) &&
1559 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1560 return ERR_PTR(err);
1561 }
1562
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001563 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001564 list_for_each_entry_rcu(ptype,
1565 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001566 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001568 err = ptype->gso_send_check(skb);
1569 segs = ERR_PTR(err);
1570 if (err || skb_gso_ok(skb, features))
1571 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001572 __skb_push(skb, (skb->data -
1573 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001574 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001575 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001576 break;
1577 }
1578 }
1579 rcu_read_unlock();
1580
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001581 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001582
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 return segs;
1584}
1585
1586EXPORT_SYMBOL(skb_gso_segment);
1587
Herbert Xufb286bb2005-11-10 13:01:24 -08001588/* Take action when hardware reception checksum errors are detected. */
1589#ifdef CONFIG_BUG
1590void netdev_rx_csum_fault(struct net_device *dev)
1591{
1592 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001593 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001594 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001595 dump_stack();
1596 }
1597}
1598EXPORT_SYMBOL(netdev_rx_csum_fault);
1599#endif
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601/* Actually, we should eliminate this check as soon as we know, that:
1602 * 1. IOMMU is present and allows to map all the memory.
1603 * 2. No high memory really exists on this machine.
1604 */
1605
1606static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1607{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001608#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 int i;
1610
1611 if (dev->features & NETIF_F_HIGHDMA)
1612 return 0;
1613
1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1615 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1616 return 1;
1617
Herbert Xu3d3a8532006-06-27 13:33:10 -07001618#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 return 0;
1620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001622struct dev_gso_cb {
1623 void (*destructor)(struct sk_buff *skb);
1624};
1625
1626#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1627
1628static void dev_gso_skb_destructor(struct sk_buff *skb)
1629{
1630 struct dev_gso_cb *cb;
1631
1632 do {
1633 struct sk_buff *nskb = skb->next;
1634
1635 skb->next = nskb->next;
1636 nskb->next = NULL;
1637 kfree_skb(nskb);
1638 } while (skb->next);
1639
1640 cb = DEV_GSO_CB(skb);
1641 if (cb->destructor)
1642 cb->destructor(skb);
1643}
1644
1645/**
1646 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1647 * @skb: buffer to segment
1648 *
1649 * This function segments the given skb and stores the list of segments
1650 * in skb->next.
1651 */
1652static int dev_gso_segment(struct sk_buff *skb)
1653{
1654 struct net_device *dev = skb->dev;
1655 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1657 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001658
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 segs = skb_gso_segment(skb, features);
1660
1661 /* Verifying header integrity only. */
1662 if (!segs)
1663 return 0;
1664
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001665 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666 return PTR_ERR(segs);
1667
1668 skb->next = segs;
1669 DEV_GSO_CB(skb)->destructor = skb->destructor;
1670 skb->destructor = dev_gso_skb_destructor;
1671
1672 return 0;
1673}
1674
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001675int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1676 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001677{
Stephen Hemminger00829822008-11-20 20:14:53 -08001678 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001679 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001680
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001681 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001682 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001683 dev_queue_xmit_nit(skb, dev);
1684
Herbert Xu576a30e2006-06-27 13:22:38 -07001685 if (netif_needs_gso(dev, skb)) {
1686 if (unlikely(dev_gso_segment(skb)))
1687 goto out_kfree_skb;
1688 if (skb->next)
1689 goto gso;
1690 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001691
Eric Dumazet93f154b2009-05-18 22:19:19 -07001692 /*
1693 * If device doesnt need skb->dst, release it right now while
1694 * its hot in this cpu cache
1695 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001696 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1697 skb_dst_drop(skb);
1698
Patrick Ohlyac45f602009-02-12 05:03:37 +00001699 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001700 if (rc == 0)
1701 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001702 /*
1703 * TODO: if skb_orphan() was called by
1704 * dev->hard_start_xmit() (for example, the unmodified
1705 * igb driver does that; bnx2 doesn't), then
1706 * skb_tx_software_timestamp() will be unable to send
1707 * back the time stamp.
1708 *
1709 * How can this be prevented? Always create another
1710 * reference to the socket before calling
1711 * dev->hard_start_xmit()? Prevent that skb_orphan()
1712 * does anything in dev->hard_start_xmit() by clearing
1713 * the skb destructor before the call and restoring it
1714 * afterwards, then doing the skb_orphan() ourselves?
1715 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001716 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001717 }
1718
Herbert Xu576a30e2006-06-27 13:22:38 -07001719gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001720 do {
1721 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001722
1723 skb->next = nskb->next;
1724 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001725 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001726 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001727 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001728 skb->next = nskb;
1729 return rc;
1730 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001731 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001732 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001733 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001734 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001735
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001736 skb->destructor = DEV_GSO_CB(skb)->destructor;
1737
1738out_kfree_skb:
1739 kfree_skb(skb);
1740 return 0;
1741}
1742
David S. Miller70192982009-01-27 16:34:47 -08001743static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001744
Stephen Hemminger92477442009-03-21 13:39:26 -07001745u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001746{
David S. Miller70192982009-01-27 16:34:47 -08001747 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001748
David S. Miller513de112009-05-03 14:43:10 -07001749 if (skb_rx_queue_recorded(skb)) {
1750 hash = skb_get_rx_queue(skb);
1751 while (unlikely (hash >= dev->real_num_tx_queues))
1752 hash -= dev->real_num_tx_queues;
1753 return hash;
1754 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001755
1756 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001757 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001758 else
David S. Miller70192982009-01-27 16:34:47 -08001759 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001760
David S. Miller70192982009-01-27 16:34:47 -08001761 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001762
David S. Millerb6b2fed2008-07-21 09:48:06 -07001763 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001764}
Stephen Hemminger92477442009-03-21 13:39:26 -07001765EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001766
David S. Millere8a04642008-07-17 00:34:19 -07001767static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1768 struct sk_buff *skb)
1769{
Stephen Hemminger00829822008-11-20 20:14:53 -08001770 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001771 u16 queue_index = 0;
1772
Stephen Hemminger00829822008-11-20 20:14:53 -08001773 if (ops->ndo_select_queue)
1774 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001775 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001776 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001777
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001778 skb_set_queue_mapping(skb, queue_index);
1779 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001780}
1781
Dave Jonesd29f7492008-07-22 14:09:06 -07001782/**
1783 * dev_queue_xmit - transmit a buffer
1784 * @skb: buffer to transmit
1785 *
1786 * Queue a buffer for transmission to a network device. The caller must
1787 * have set the device and priority and built the buffer before calling
1788 * this function. The function can be called from an interrupt.
1789 *
1790 * A negative errno code is returned on a failure. A success does not
1791 * guarantee the frame will be transmitted as it may be dropped due
1792 * to congestion or traffic shaping.
1793 *
1794 * -----------------------------------------------------------------------------------
1795 * I notice this method can also return errors from the queue disciplines,
1796 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1797 * be positive.
1798 *
1799 * Regardless of the return value, the skb is consumed, so it is currently
1800 * difficult to retry a send to this method. (You can bump the ref count
1801 * before sending to hold a reference for retry if you are careful.)
1802 *
1803 * When calling this method, interrupts MUST be enabled. This is because
1804 * the BH enable code must have IRQs enabled so that it will not deadlock.
1805 * --BLG
1806 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807int dev_queue_xmit(struct sk_buff *skb)
1808{
1809 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001810 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 struct Qdisc *q;
1812 int rc = -ENOMEM;
1813
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001814 /* GSO will handle the following emulations directly. */
1815 if (netif_needs_gso(dev, skb))
1816 goto gso;
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 if (skb_shinfo(skb)->frag_list &&
1819 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001820 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 goto out_kfree_skb;
1822
1823 /* Fragmented skb is linearized if device does not support SG,
1824 * or if at least one of fragments is in highmem and device
1825 * does not support DMA from it.
1826 */
1827 if (skb_shinfo(skb)->nr_frags &&
1828 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001829 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 goto out_kfree_skb;
1831
1832 /* If packet is not checksummed and device does not support
1833 * checksumming for this protocol, complete checksumming here.
1834 */
Herbert Xu663ead32007-04-09 11:59:07 -07001835 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1836 skb_set_transport_header(skb, skb->csum_start -
1837 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001838 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1839 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001842gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001843 /* Disable soft irqs for various locks below. Also
1844 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001846 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
David S. Millereae792b2008-07-15 03:03:33 -07001848 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001849 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851#ifdef CONFIG_NET_CLS_ACT
1852 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1853#endif
1854 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001855 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
David S. Miller37437bb2008-07-16 02:15:04 -07001857 spin_lock(root_lock);
1858
David S. Millera9312ae2008-08-17 21:51:03 -07001859 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001860 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001861 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001862 } else {
1863 rc = qdisc_enqueue_root(skb, q);
1864 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001865 }
David S. Miller37437bb2008-07-16 02:15:04 -07001866 spin_unlock(root_lock);
1867
David S. Miller37437bb2008-07-16 02:15:04 -07001868 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 }
1870
1871 /* The device has no queue. Common case for software devices:
1872 loopback, all the sorts of tunnels...
1873
Herbert Xu932ff272006-06-09 12:20:56 -07001874 Really, it is unlikely that netif_tx_lock protection is necessary
1875 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 counters.)
1877 However, it is possible, that they rely on protection
1878 made by us here.
1879
1880 Check this and shot the lock. It is not prone from deadlocks.
1881 Either shot noqueue qdisc, it is even simpler 8)
1882 */
1883 if (dev->flags & IFF_UP) {
1884 int cpu = smp_processor_id(); /* ok because BHs are off */
1885
David S. Millerc773e842008-07-08 23:13:53 -07001886 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
David S. Millerc773e842008-07-08 23:13:53 -07001888 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001890 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001892 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001893 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 goto out;
1895 }
1896 }
David S. Millerc773e842008-07-08 23:13:53 -07001897 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 if (net_ratelimit())
1899 printk(KERN_CRIT "Virtual device %s asks to "
1900 "queue packet!\n", dev->name);
1901 } else {
1902 /* Recursion is detected! It is possible,
1903 * unfortunately */
1904 if (net_ratelimit())
1905 printk(KERN_CRIT "Dead loop on virtual device "
1906 "%s, fix it urgently!\n", dev->name);
1907 }
1908 }
1909
1910 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001911 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913out_kfree_skb:
1914 kfree_skb(skb);
1915 return rc;
1916out:
Herbert Xud4828d82006-06-22 02:28:18 -07001917 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 return rc;
1919}
1920
1921
1922/*=======================================================================
1923 Receiver routines
1924 =======================================================================*/
1925
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001926int netdev_max_backlog __read_mostly = 1000;
1927int netdev_budget __read_mostly = 300;
1928int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1931
1932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933/**
1934 * netif_rx - post buffer to the network code
1935 * @skb: buffer to post
1936 *
1937 * This function receives a packet from a device driver and queues it for
1938 * the upper (protocol) levels to process. It always succeeds. The buffer
1939 * may be dropped during processing for congestion control or by the
1940 * protocol layers.
1941 *
1942 * return values:
1943 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 * NET_RX_DROP (packet was dropped)
1945 *
1946 */
1947
1948int netif_rx(struct sk_buff *skb)
1949{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 struct softnet_data *queue;
1951 unsigned long flags;
1952
1953 /* if netpoll wants it, pretend we never saw it */
1954 if (netpoll_rx(skb))
1955 return NET_RX_DROP;
1956
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001957 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001958 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960 /*
1961 * The code is rearranged so that the path is the most
1962 * short when CPU is congested, but is still operating.
1963 */
1964 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 queue = &__get_cpu_var(softnet_data);
1966
1967 __get_cpu_var(netdev_rx_stat).total++;
1968 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1969 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001973 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001976 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 goto enqueue;
1978 }
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 __get_cpu_var(netdev_rx_stat).dropped++;
1981 local_irq_restore(flags);
1982
1983 kfree_skb(skb);
1984 return NET_RX_DROP;
1985}
1986
1987int netif_rx_ni(struct sk_buff *skb)
1988{
1989 int err;
1990
1991 preempt_disable();
1992 err = netif_rx(skb);
1993 if (local_softirq_pending())
1994 do_softirq();
1995 preempt_enable();
1996
1997 return err;
1998}
1999
2000EXPORT_SYMBOL(netif_rx_ni);
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002static void net_tx_action(struct softirq_action *h)
2003{
2004 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2005
2006 if (sd->completion_queue) {
2007 struct sk_buff *clist;
2008
2009 local_irq_disable();
2010 clist = sd->completion_queue;
2011 sd->completion_queue = NULL;
2012 local_irq_enable();
2013
2014 while (clist) {
2015 struct sk_buff *skb = clist;
2016 clist = clist->next;
2017
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002018 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 __kfree_skb(skb);
2020 }
2021 }
2022
2023 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002024 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
2026 local_irq_disable();
2027 head = sd->output_queue;
2028 sd->output_queue = NULL;
2029 local_irq_enable();
2030
2031 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002032 struct Qdisc *q = head;
2033 spinlock_t *root_lock;
2034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 head = head->next_sched;
2036
David S. Miller5fb66222008-08-02 20:02:43 -07002037 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002038 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002039 smp_mb__before_clear_bit();
2040 clear_bit(__QDISC_STATE_SCHED,
2041 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002042 qdisc_run(q);
2043 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002045 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002046 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002047 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002048 } else {
2049 smp_mb__before_clear_bit();
2050 clear_bit(__QDISC_STATE_SCHED,
2051 &q->state);
2052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 }
2054 }
2055 }
2056}
2057
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002058static inline int deliver_skb(struct sk_buff *skb,
2059 struct packet_type *pt_prev,
2060 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
2062 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002063 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064}
2065
2066#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002067/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068struct net_bridge;
2069struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2070 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002071void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Stephen Hemminger6229e362007-03-21 13:38:47 -07002073/*
2074 * If bridge module is loaded call bridging hook.
2075 * returns NULL if packet was consumed.
2076 */
2077struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2078 struct sk_buff *skb) __read_mostly;
2079static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2080 struct packet_type **pt_prev, int *ret,
2081 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082{
2083 struct net_bridge_port *port;
2084
Stephen Hemminger6229e362007-03-21 13:38:47 -07002085 if (skb->pkt_type == PACKET_LOOPBACK ||
2086 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2087 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002090 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002092 }
2093
Stephen Hemminger6229e362007-03-21 13:38:47 -07002094 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095}
2096#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002097#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098#endif
2099
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002100#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2101struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2102EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2103
2104static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2105 struct packet_type **pt_prev,
2106 int *ret,
2107 struct net_device *orig_dev)
2108{
2109 if (skb->dev->macvlan_port == NULL)
2110 return skb;
2111
2112 if (*pt_prev) {
2113 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2114 *pt_prev = NULL;
2115 }
2116 return macvlan_handle_frame_hook(skb);
2117}
2118#else
2119#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2120#endif
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122#ifdef CONFIG_NET_CLS_ACT
2123/* TODO: Maybe we should just force sch_ingress to be compiled in
2124 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2125 * a compare and 2 stores extra right now if we dont have it on
2126 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002127 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 * the ingress scheduler, you just cant add policies on ingress.
2129 *
2130 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002131static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002134 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002135 struct netdev_queue *rxq;
2136 int result = TC_ACT_OK;
2137 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002138
Herbert Xuf697c3e2007-10-14 00:38:47 -07002139 if (MAX_RED_LOOP < ttl++) {
2140 printk(KERN_WARNING
2141 "Redir loop detected Dropping packet (%d->%d)\n",
2142 skb->iif, dev->ifindex);
2143 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 }
2145
Herbert Xuf697c3e2007-10-14 00:38:47 -07002146 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2147 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2148
David S. Miller555353c2008-07-08 17:33:13 -07002149 rxq = &dev->rx_queue;
2150
David S. Miller83874002008-07-17 00:53:03 -07002151 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002152 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002153 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002154 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2155 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002156 spin_unlock(qdisc_lock(q));
2157 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 return result;
2160}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002161
2162static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2163 struct packet_type **pt_prev,
2164 int *ret, struct net_device *orig_dev)
2165{
David S. Miller8d50b532008-07-30 02:37:46 -07002166 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002167 goto out;
2168
2169 if (*pt_prev) {
2170 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2171 *pt_prev = NULL;
2172 } else {
2173 /* Huh? Why does turning on AF_PACKET affect this? */
2174 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2175 }
2176
2177 switch (ing_filter(skb)) {
2178 case TC_ACT_SHOT:
2179 case TC_ACT_STOLEN:
2180 kfree_skb(skb);
2181 return NULL;
2182 }
2183
2184out:
2185 skb->tc_verd = 0;
2186 return skb;
2187}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188#endif
2189
Patrick McHardybc1d0412008-07-14 22:49:30 -07002190/*
2191 * netif_nit_deliver - deliver received packets to network taps
2192 * @skb: buffer
2193 *
2194 * This function is used to deliver incoming packets to network
2195 * taps. It should be used when the normal netif_receive_skb path
2196 * is bypassed, for example because of VLAN acceleration.
2197 */
2198void netif_nit_deliver(struct sk_buff *skb)
2199{
2200 struct packet_type *ptype;
2201
2202 if (list_empty(&ptype_all))
2203 return;
2204
2205 skb_reset_network_header(skb);
2206 skb_reset_transport_header(skb);
2207 skb->mac_len = skb->network_header - skb->mac_header;
2208
2209 rcu_read_lock();
2210 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2211 if (!ptype->dev || ptype->dev == skb->dev)
2212 deliver_skb(skb, ptype, skb->dev);
2213 }
2214 rcu_read_unlock();
2215}
2216
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002217/**
2218 * netif_receive_skb - process receive buffer from network
2219 * @skb: buffer to process
2220 *
2221 * netif_receive_skb() is the main receive data processing function.
2222 * It always succeeds. The buffer may be dropped during processing
2223 * for congestion control or by the protocol layers.
2224 *
2225 * This function may only be called from softirq context and interrupts
2226 * should be enabled.
2227 *
2228 * Return values (usually ignored):
2229 * NET_RX_SUCCESS: no congestion
2230 * NET_RX_DROP: packet was dropped
2231 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232int netif_receive_skb(struct sk_buff *skb)
2233{
2234 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002235 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002236 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002238 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002240 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2241 return NET_RX_SUCCESS;
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002244 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 return NET_RX_DROP;
2246
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002247 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002248 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Patrick McHardyc01003c2007-03-29 11:46:52 -07002250 if (!skb->iif)
2251 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002252
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002253 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002254 orig_dev = skb->dev;
2255 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002256 if (skb_bond_should_drop(skb))
2257 null_or_orig = orig_dev; /* deliver only exact match */
2258 else
2259 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002260 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 __get_cpu_var(netdev_rx_stat).total++;
2263
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002264 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002265 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002266 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 pt_prev = NULL;
2269
2270 rcu_read_lock();
2271
2272#ifdef CONFIG_NET_CLS_ACT
2273 if (skb->tc_verd & TC_NCLS) {
2274 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2275 goto ncls;
2276 }
2277#endif
2278
2279 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002280 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2281 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002282 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002283 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 pt_prev = ptype;
2285 }
2286 }
2287
2288#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002289 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2290 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292ncls:
2293#endif
2294
Stephen Hemminger6229e362007-03-21 13:38:47 -07002295 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2296 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002298 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2299 if (!skb)
2300 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
Herbert Xu9a279bc2009-02-04 16:55:27 -08002302 skb_orphan(skb);
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002305 list_for_each_entry_rcu(ptype,
2306 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002308 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2309 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002310 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002311 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 pt_prev = ptype;
2313 }
2314 }
2315
2316 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002317 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 } else {
2319 kfree_skb(skb);
2320 /* Jamal, now you will not able to escape explaining
2321 * me how you were going to use this. :-)
2322 */
2323 ret = NET_RX_DROP;
2324 }
2325
2326out:
2327 rcu_read_unlock();
2328 return ret;
2329}
2330
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002331/* Network device is going away, flush any packets still pending */
2332static void flush_backlog(void *arg)
2333{
2334 struct net_device *dev = arg;
2335 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2336 struct sk_buff *skb, *tmp;
2337
2338 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2339 if (skb->dev == dev) {
2340 __skb_unlink(skb, &queue->input_pkt_queue);
2341 kfree_skb(skb);
2342 }
2343}
2344
Herbert Xud565b0a2008-12-15 23:38:52 -08002345static int napi_gro_complete(struct sk_buff *skb)
2346{
2347 struct packet_type *ptype;
2348 __be16 type = skb->protocol;
2349 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2350 int err = -ENOENT;
2351
Herbert Xufc59f9a2009-04-14 15:11:06 -07002352 if (NAPI_GRO_CB(skb)->count == 1) {
2353 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002354 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002355 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002356
2357 rcu_read_lock();
2358 list_for_each_entry_rcu(ptype, head, list) {
2359 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2360 continue;
2361
2362 err = ptype->gro_complete(skb);
2363 break;
2364 }
2365 rcu_read_unlock();
2366
2367 if (err) {
2368 WARN_ON(&ptype->list == head);
2369 kfree_skb(skb);
2370 return NET_RX_SUCCESS;
2371 }
2372
2373out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002374 return netif_receive_skb(skb);
2375}
2376
2377void napi_gro_flush(struct napi_struct *napi)
2378{
2379 struct sk_buff *skb, *next;
2380
2381 for (skb = napi->gro_list; skb; skb = next) {
2382 next = skb->next;
2383 skb->next = NULL;
2384 napi_gro_complete(skb);
2385 }
2386
Herbert Xu4ae55442009-02-08 18:00:36 +00002387 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002388 napi->gro_list = NULL;
2389}
2390EXPORT_SYMBOL(napi_gro_flush);
2391
Herbert Xu96e93ea2009-01-06 10:49:34 -08002392int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002393{
2394 struct sk_buff **pp = NULL;
2395 struct packet_type *ptype;
2396 __be16 type = skb->protocol;
2397 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002398 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002399 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002400 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002401
2402 if (!(skb->dev->features & NETIF_F_GRO))
2403 goto normal;
2404
Herbert Xuf17f5c92009-01-14 14:36:12 -08002405 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2406 goto normal;
2407
Herbert Xud565b0a2008-12-15 23:38:52 -08002408 rcu_read_lock();
2409 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002410 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2411 continue;
2412
Herbert Xu86911732009-01-29 14:19:50 +00002413 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002414 mac_len = skb->network_header - skb->mac_header;
2415 skb->mac_len = mac_len;
2416 NAPI_GRO_CB(skb)->same_flow = 0;
2417 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002418 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002419
Herbert Xud565b0a2008-12-15 23:38:52 -08002420 pp = ptype->gro_receive(&napi->gro_list, skb);
2421 break;
2422 }
2423 rcu_read_unlock();
2424
2425 if (&ptype->list == head)
2426 goto normal;
2427
Herbert Xu0da2afd52008-12-26 14:57:42 -08002428 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002429 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002430
Herbert Xud565b0a2008-12-15 23:38:52 -08002431 if (pp) {
2432 struct sk_buff *nskb = *pp;
2433
2434 *pp = nskb->next;
2435 nskb->next = NULL;
2436 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002437 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002438 }
2439
Herbert Xu0da2afd52008-12-26 14:57:42 -08002440 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002441 goto ok;
2442
Herbert Xu4ae55442009-02-08 18:00:36 +00002443 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002444 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002445
Herbert Xu4ae55442009-02-08 18:00:36 +00002446 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002447 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002448 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002449 skb->next = napi->gro_list;
2450 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002451 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002452
Herbert Xuad0f9902009-02-01 01:24:55 -08002453pull:
Herbert Xucb189782009-05-26 18:50:31 +00002454 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2455 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2456
2457 BUG_ON(skb->end - skb->tail < grow);
2458
2459 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2460
2461 skb->tail += grow;
2462 skb->data_len -= grow;
2463
2464 skb_shinfo(skb)->frags[0].page_offset += grow;
2465 skb_shinfo(skb)->frags[0].size -= grow;
2466
2467 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2468 put_page(skb_shinfo(skb)->frags[0].page);
2469 memmove(skb_shinfo(skb)->frags,
2470 skb_shinfo(skb)->frags + 1,
2471 --skb_shinfo(skb)->nr_frags);
2472 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002473 }
2474
Herbert Xud565b0a2008-12-15 23:38:52 -08002475ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002476 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002477
2478normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002479 ret = GRO_NORMAL;
2480 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002481}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002482EXPORT_SYMBOL(dev_gro_receive);
2483
2484static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2485{
2486 struct sk_buff *p;
2487
Herbert Xud1c76af2009-03-16 10:50:02 -07002488 if (netpoll_rx_on(skb))
2489 return GRO_NORMAL;
2490
Herbert Xu96e93ea2009-01-06 10:49:34 -08002491 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002492 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2493 && !compare_ether_header(skb_mac_header(p),
2494 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002495 NAPI_GRO_CB(p)->flush = 0;
2496 }
2497
2498 return dev_gro_receive(napi, skb);
2499}
Herbert Xu5d38a072009-01-04 16:13:40 -08002500
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002501int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002502{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002503 int err = NET_RX_SUCCESS;
2504
2505 switch (ret) {
2506 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002507 return netif_receive_skb(skb);
2508
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002509 case GRO_DROP:
2510 err = NET_RX_DROP;
2511 /* fall through */
2512
2513 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002514 kfree_skb(skb);
2515 break;
2516 }
2517
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002518 return err;
2519}
2520EXPORT_SYMBOL(napi_skb_finish);
2521
Herbert Xu78a478d2009-05-26 18:50:21 +00002522void skb_gro_reset_offset(struct sk_buff *skb)
2523{
2524 NAPI_GRO_CB(skb)->data_offset = 0;
2525 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002526 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002527
Herbert Xu78d3fd02009-05-26 18:50:23 +00002528 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002529 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002530 NAPI_GRO_CB(skb)->frag0 =
2531 page_address(skb_shinfo(skb)->frags[0].page) +
2532 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002533 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2534 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002535}
2536EXPORT_SYMBOL(skb_gro_reset_offset);
2537
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002538int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2539{
Herbert Xu86911732009-01-29 14:19:50 +00002540 skb_gro_reset_offset(skb);
2541
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002542 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002543}
2544EXPORT_SYMBOL(napi_gro_receive);
2545
Herbert Xu96e93ea2009-01-06 10:49:34 -08002546void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2547{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002548 __skb_pull(skb, skb_headlen(skb));
2549 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2550
2551 napi->skb = skb;
2552}
2553EXPORT_SYMBOL(napi_reuse_skb);
2554
Herbert Xu76620aa2009-04-16 02:02:07 -07002555struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002556{
2557 struct net_device *dev = napi->dev;
2558 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002559
2560 if (!skb) {
2561 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2562 if (!skb)
2563 goto out;
2564
2565 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002566
2567 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002568 }
2569
Herbert Xu96e93ea2009-01-06 10:49:34 -08002570out:
2571 return skb;
2572}
Herbert Xu76620aa2009-04-16 02:02:07 -07002573EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002574
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002575int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2576{
2577 int err = NET_RX_SUCCESS;
2578
2579 switch (ret) {
2580 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002581 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002582 skb->protocol = eth_type_trans(skb, napi->dev);
2583
2584 if (ret == GRO_NORMAL)
2585 return netif_receive_skb(skb);
2586
2587 skb_gro_pull(skb, -ETH_HLEN);
2588 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002589
2590 case GRO_DROP:
2591 err = NET_RX_DROP;
2592 /* fall through */
2593
2594 case GRO_MERGED_FREE:
2595 napi_reuse_skb(napi, skb);
2596 break;
2597 }
2598
2599 return err;
2600}
2601EXPORT_SYMBOL(napi_frags_finish);
2602
Herbert Xu76620aa2009-04-16 02:02:07 -07002603struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002604{
Herbert Xu76620aa2009-04-16 02:02:07 -07002605 struct sk_buff *skb = napi->skb;
2606 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002607 unsigned int hlen;
2608 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002609
2610 napi->skb = NULL;
2611
2612 skb_reset_mac_header(skb);
2613 skb_gro_reset_offset(skb);
2614
Herbert Xua5b1cf22009-05-26 18:50:28 +00002615 off = skb_gro_offset(skb);
2616 hlen = off + sizeof(*eth);
2617 eth = skb_gro_header_fast(skb, off);
2618 if (skb_gro_header_hard(skb, hlen)) {
2619 eth = skb_gro_header_slow(skb, hlen, off);
2620 if (unlikely(!eth)) {
2621 napi_reuse_skb(napi, skb);
2622 skb = NULL;
2623 goto out;
2624 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002625 }
2626
2627 skb_gro_pull(skb, sizeof(*eth));
2628
2629 /*
2630 * This works because the only protocols we care about don't require
2631 * special handling. We'll fix it up properly at the end.
2632 */
2633 skb->protocol = eth->h_proto;
2634
2635out:
2636 return skb;
2637}
2638EXPORT_SYMBOL(napi_frags_skb);
2639
2640int napi_gro_frags(struct napi_struct *napi)
2641{
2642 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002643
2644 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002645 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002646
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002647 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002648}
2649EXPORT_SYMBOL(napi_gro_frags);
2650
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002651static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652{
2653 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2655 unsigned long start_time = jiffies;
2656
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002657 napi->weight = weight_p;
2658 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
2661 local_irq_disable();
2662 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002663 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002664 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002665 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002666 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 local_irq_enable();
2669
Herbert Xu8f1ead22009-03-26 00:59:10 -07002670 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002671 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002673 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674}
2675
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002676/**
2677 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002678 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002679 *
2680 * The entry's receive function will be scheduled to run
2681 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002682void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002683{
2684 unsigned long flags;
2685
2686 local_irq_save(flags);
2687 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2688 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2689 local_irq_restore(flags);
2690}
2691EXPORT_SYMBOL(__napi_schedule);
2692
Herbert Xud565b0a2008-12-15 23:38:52 -08002693void __napi_complete(struct napi_struct *n)
2694{
2695 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2696 BUG_ON(n->gro_list);
2697
2698 list_del(&n->poll_list);
2699 smp_mb__before_clear_bit();
2700 clear_bit(NAPI_STATE_SCHED, &n->state);
2701}
2702EXPORT_SYMBOL(__napi_complete);
2703
2704void napi_complete(struct napi_struct *n)
2705{
2706 unsigned long flags;
2707
2708 /*
2709 * don't let napi dequeue from the cpu poll list
2710 * just in case its running on a different cpu
2711 */
2712 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2713 return;
2714
2715 napi_gro_flush(n);
2716 local_irq_save(flags);
2717 __napi_complete(n);
2718 local_irq_restore(flags);
2719}
2720EXPORT_SYMBOL(napi_complete);
2721
2722void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2723 int (*poll)(struct napi_struct *, int), int weight)
2724{
2725 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002726 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002727 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002728 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002729 napi->poll = poll;
2730 napi->weight = weight;
2731 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002732 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002733#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002734 spin_lock_init(&napi->poll_lock);
2735 napi->poll_owner = -1;
2736#endif
2737 set_bit(NAPI_STATE_SCHED, &napi->state);
2738}
2739EXPORT_SYMBOL(netif_napi_add);
2740
2741void netif_napi_del(struct napi_struct *napi)
2742{
2743 struct sk_buff *skb, *next;
2744
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002745 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002746 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002747
2748 for (skb = napi->gro_list; skb; skb = next) {
2749 next = skb->next;
2750 skb->next = NULL;
2751 kfree_skb(skb);
2752 }
2753
2754 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002755 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002756}
2757EXPORT_SYMBOL(netif_napi_del);
2758
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760static void net_rx_action(struct softirq_action *h)
2761{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002762 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002763 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002764 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002765 void *have;
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 local_irq_disable();
2768
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002769 while (!list_empty(list)) {
2770 struct napi_struct *n;
2771 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002773 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002774 * Allow this to run for 2 jiffies since which will allow
2775 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002776 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002777 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 goto softnet_break;
2779
2780 local_irq_enable();
2781
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002782 /* Even though interrupts have been re-enabled, this
2783 * access is safe because interrupts can only add new
2784 * entries to the tail of this list, and only ->poll()
2785 * calls can remove this head entry from the list.
2786 */
2787 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002789 have = netpoll_poll_lock(n);
2790
2791 weight = n->weight;
2792
David S. Miller0a7606c2007-10-29 21:28:47 -07002793 /* This NAPI_STATE_SCHED test is for avoiding a race
2794 * with netpoll's poll_napi(). Only the entity which
2795 * obtains the lock and sees NAPI_STATE_SCHED set will
2796 * actually make the ->poll() call. Therefore we avoid
2797 * accidently calling ->poll() when NAPI is not scheduled.
2798 */
2799 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002800 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002801 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002802 trace_napi_poll(n);
2803 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002804
2805 WARN_ON_ONCE(work > weight);
2806
2807 budget -= work;
2808
2809 local_irq_disable();
2810
2811 /* Drivers must not modify the NAPI state if they
2812 * consume the entire weight. In such cases this code
2813 * still "owns" the NAPI instance and therefore can
2814 * move the instance around on the list at-will.
2815 */
David S. Millerfed17f32008-01-07 21:00:40 -08002816 if (unlikely(work == weight)) {
2817 if (unlikely(napi_disable_pending(n)))
2818 __napi_complete(n);
2819 else
2820 list_move_tail(&n->poll_list, list);
2821 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002822
2823 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 }
2825out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002826 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002827
Chris Leechdb217332006-06-17 21:24:58 -07002828#ifdef CONFIG_NET_DMA
2829 /*
2830 * There may not be any more sk_buffs coming right now, so push
2831 * any pending DMA copies to hardware
2832 */
Dan Williams2ba05622009-01-06 11:38:14 -07002833 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002834#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 return;
2837
2838softnet_break:
2839 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2840 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2841 goto out;
2842}
2843
2844static gifconf_func_t * gifconf_list [NPROTO];
2845
2846/**
2847 * register_gifconf - register a SIOCGIF handler
2848 * @family: Address family
2849 * @gifconf: Function handler
2850 *
2851 * Register protocol dependent address dumping routines. The handler
2852 * that is passed must not be freed or reused until it has been replaced
2853 * by another handler.
2854 */
2855int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2856{
2857 if (family >= NPROTO)
2858 return -EINVAL;
2859 gifconf_list[family] = gifconf;
2860 return 0;
2861}
2862
2863
2864/*
2865 * Map an interface index to its name (SIOCGIFNAME)
2866 */
2867
2868/*
2869 * We need this ioctl for efficient implementation of the
2870 * if_indextoname() function required by the IPv6 API. Without
2871 * it, we would have to search all the interfaces to find a
2872 * match. --pb
2873 */
2874
Eric W. Biederman881d9662007-09-17 11:56:21 -07002875static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876{
2877 struct net_device *dev;
2878 struct ifreq ifr;
2879
2880 /*
2881 * Fetch the caller's info block.
2882 */
2883
2884 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2885 return -EFAULT;
2886
2887 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002888 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 if (!dev) {
2890 read_unlock(&dev_base_lock);
2891 return -ENODEV;
2892 }
2893
2894 strcpy(ifr.ifr_name, dev->name);
2895 read_unlock(&dev_base_lock);
2896
2897 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2898 return -EFAULT;
2899 return 0;
2900}
2901
2902/*
2903 * Perform a SIOCGIFCONF call. This structure will change
2904 * size eventually, and there is nothing I can do about it.
2905 * Thus we will need a 'compatibility mode'.
2906 */
2907
Eric W. Biederman881d9662007-09-17 11:56:21 -07002908static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
2910 struct ifconf ifc;
2911 struct net_device *dev;
2912 char __user *pos;
2913 int len;
2914 int total;
2915 int i;
2916
2917 /*
2918 * Fetch the caller's info block.
2919 */
2920
2921 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2922 return -EFAULT;
2923
2924 pos = ifc.ifc_buf;
2925 len = ifc.ifc_len;
2926
2927 /*
2928 * Loop over the interfaces, and write an info block for each.
2929 */
2930
2931 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002932 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 for (i = 0; i < NPROTO; i++) {
2934 if (gifconf_list[i]) {
2935 int done;
2936 if (!pos)
2937 done = gifconf_list[i](dev, NULL, 0);
2938 else
2939 done = gifconf_list[i](dev, pos + total,
2940 len - total);
2941 if (done < 0)
2942 return -EFAULT;
2943 total += done;
2944 }
2945 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947
2948 /*
2949 * All done. Write the updated control block back to the caller.
2950 */
2951 ifc.ifc_len = total;
2952
2953 /*
2954 * Both BSD and Solaris return 0 here, so we do too.
2955 */
2956 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2957}
2958
2959#ifdef CONFIG_PROC_FS
2960/*
2961 * This is invoked by the /proc filesystem handler to display a device
2962 * in detail.
2963 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002965 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966{
Denis V. Luneve372c412007-11-19 22:31:54 -08002967 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002968 loff_t off;
2969 struct net_device *dev;
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002972 if (!*pos)
2973 return SEQ_START_TOKEN;
2974
2975 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002976 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002977 if (off++ == *pos)
2978 return dev;
2979
2980 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981}
2982
2983void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2984{
Denis V. Luneve372c412007-11-19 22:31:54 -08002985 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002987 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002988 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989}
2990
2991void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002992 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993{
2994 read_unlock(&dev_base_lock);
2995}
2996
2997static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2998{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08002999 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000
Rusty Russell5a1b5892007-04-28 21:04:03 -07003001 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3002 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3003 dev->name, stats->rx_bytes, stats->rx_packets,
3004 stats->rx_errors,
3005 stats->rx_dropped + stats->rx_missed_errors,
3006 stats->rx_fifo_errors,
3007 stats->rx_length_errors + stats->rx_over_errors +
3008 stats->rx_crc_errors + stats->rx_frame_errors,
3009 stats->rx_compressed, stats->multicast,
3010 stats->tx_bytes, stats->tx_packets,
3011 stats->tx_errors, stats->tx_dropped,
3012 stats->tx_fifo_errors, stats->collisions,
3013 stats->tx_carrier_errors +
3014 stats->tx_aborted_errors +
3015 stats->tx_window_errors +
3016 stats->tx_heartbeat_errors,
3017 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018}
3019
3020/*
3021 * Called from the PROCfs module. This now uses the new arbitrary sized
3022 * /proc/net interface to create /proc/net/dev
3023 */
3024static int dev_seq_show(struct seq_file *seq, void *v)
3025{
3026 if (v == SEQ_START_TOKEN)
3027 seq_puts(seq, "Inter-| Receive "
3028 " | Transmit\n"
3029 " face |bytes packets errs drop fifo frame "
3030 "compressed multicast|bytes packets errs "
3031 "drop fifo colls carrier compressed\n");
3032 else
3033 dev_seq_printf_stats(seq, v);
3034 return 0;
3035}
3036
3037static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3038{
3039 struct netif_rx_stats *rc = NULL;
3040
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003041 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003042 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 rc = &per_cpu(netdev_rx_stat, *pos);
3044 break;
3045 } else
3046 ++*pos;
3047 return rc;
3048}
3049
3050static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3051{
3052 return softnet_get_online(pos);
3053}
3054
3055static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3056{
3057 ++*pos;
3058 return softnet_get_online(pos);
3059}
3060
3061static void softnet_seq_stop(struct seq_file *seq, void *v)
3062{
3063}
3064
3065static int softnet_seq_show(struct seq_file *seq, void *v)
3066{
3067 struct netif_rx_stats *s = v;
3068
3069 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003070 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003071 0, 0, 0, 0, /* was fastroute */
3072 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073 return 0;
3074}
3075
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003076static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003077 .start = dev_seq_start,
3078 .next = dev_seq_next,
3079 .stop = dev_seq_stop,
3080 .show = dev_seq_show,
3081};
3082
3083static int dev_seq_open(struct inode *inode, struct file *file)
3084{
Denis V. Luneve372c412007-11-19 22:31:54 -08003085 return seq_open_net(inode, file, &dev_seq_ops,
3086 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087}
3088
Arjan van de Ven9a321442007-02-12 00:55:35 -08003089static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090 .owner = THIS_MODULE,
3091 .open = dev_seq_open,
3092 .read = seq_read,
3093 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003094 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095};
3096
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003097static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 .start = softnet_seq_start,
3099 .next = softnet_seq_next,
3100 .stop = softnet_seq_stop,
3101 .show = softnet_seq_show,
3102};
3103
3104static int softnet_seq_open(struct inode *inode, struct file *file)
3105{
3106 return seq_open(file, &softnet_seq_ops);
3107}
3108
Arjan van de Ven9a321442007-02-12 00:55:35 -08003109static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 .owner = THIS_MODULE,
3111 .open = softnet_seq_open,
3112 .read = seq_read,
3113 .llseek = seq_lseek,
3114 .release = seq_release,
3115};
3116
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003117static void *ptype_get_idx(loff_t pos)
3118{
3119 struct packet_type *pt = NULL;
3120 loff_t i = 0;
3121 int t;
3122
3123 list_for_each_entry_rcu(pt, &ptype_all, list) {
3124 if (i == pos)
3125 return pt;
3126 ++i;
3127 }
3128
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003129 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003130 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3131 if (i == pos)
3132 return pt;
3133 ++i;
3134 }
3135 }
3136 return NULL;
3137}
3138
3139static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003140 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003141{
3142 rcu_read_lock();
3143 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3144}
3145
3146static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3147{
3148 struct packet_type *pt;
3149 struct list_head *nxt;
3150 int hash;
3151
3152 ++*pos;
3153 if (v == SEQ_START_TOKEN)
3154 return ptype_get_idx(0);
3155
3156 pt = v;
3157 nxt = pt->list.next;
3158 if (pt->type == htons(ETH_P_ALL)) {
3159 if (nxt != &ptype_all)
3160 goto found;
3161 hash = 0;
3162 nxt = ptype_base[0].next;
3163 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003164 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003165
3166 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003167 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003168 return NULL;
3169 nxt = ptype_base[hash].next;
3170 }
3171found:
3172 return list_entry(nxt, struct packet_type, list);
3173}
3174
3175static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003176 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003177{
3178 rcu_read_unlock();
3179}
3180
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003181static int ptype_seq_show(struct seq_file *seq, void *v)
3182{
3183 struct packet_type *pt = v;
3184
3185 if (v == SEQ_START_TOKEN)
3186 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003187 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003188 if (pt->type == htons(ETH_P_ALL))
3189 seq_puts(seq, "ALL ");
3190 else
3191 seq_printf(seq, "%04x", ntohs(pt->type));
3192
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003193 seq_printf(seq, " %-8s %pF\n",
3194 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003195 }
3196
3197 return 0;
3198}
3199
3200static const struct seq_operations ptype_seq_ops = {
3201 .start = ptype_seq_start,
3202 .next = ptype_seq_next,
3203 .stop = ptype_seq_stop,
3204 .show = ptype_seq_show,
3205};
3206
3207static int ptype_seq_open(struct inode *inode, struct file *file)
3208{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003209 return seq_open_net(inode, file, &ptype_seq_ops,
3210 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003211}
3212
3213static const struct file_operations ptype_seq_fops = {
3214 .owner = THIS_MODULE,
3215 .open = ptype_seq_open,
3216 .read = seq_read,
3217 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003218 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003219};
3220
3221
Pavel Emelyanov46650792007-10-08 20:38:39 -07003222static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223{
3224 int rc = -ENOMEM;
3225
Eric W. Biederman881d9662007-09-17 11:56:21 -07003226 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003228 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003230 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003231 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003232
Eric W. Biederman881d9662007-09-17 11:56:21 -07003233 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003234 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 rc = 0;
3236out:
3237 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003238out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003239 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003241 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003243 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 goto out;
3245}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003246
Pavel Emelyanov46650792007-10-08 20:38:39 -07003247static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003248{
3249 wext_proc_exit(net);
3250
3251 proc_net_remove(net, "ptype");
3252 proc_net_remove(net, "softnet_stat");
3253 proc_net_remove(net, "dev");
3254}
3255
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003256static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003257 .init = dev_proc_net_init,
3258 .exit = dev_proc_net_exit,
3259};
3260
3261static int __init dev_proc_init(void)
3262{
3263 return register_pernet_subsys(&dev_proc_ops);
3264}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265#else
3266#define dev_proc_init() 0
3267#endif /* CONFIG_PROC_FS */
3268
3269
3270/**
3271 * netdev_set_master - set up master/slave pair
3272 * @slave: slave device
3273 * @master: new master device
3274 *
3275 * Changes the master device of the slave. Pass %NULL to break the
3276 * bonding. The caller must hold the RTNL semaphore. On a failure
3277 * a negative errno code is returned. On success the reference counts
3278 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3279 * function returns zero.
3280 */
3281int netdev_set_master(struct net_device *slave, struct net_device *master)
3282{
3283 struct net_device *old = slave->master;
3284
3285 ASSERT_RTNL();
3286
3287 if (master) {
3288 if (old)
3289 return -EBUSY;
3290 dev_hold(master);
3291 }
3292
3293 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003294
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 synchronize_net();
3296
3297 if (old)
3298 dev_put(old);
3299
3300 if (master)
3301 slave->flags |= IFF_SLAVE;
3302 else
3303 slave->flags &= ~IFF_SLAVE;
3304
3305 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3306 return 0;
3307}
3308
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003309static void dev_change_rx_flags(struct net_device *dev, int flags)
3310{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003311 const struct net_device_ops *ops = dev->netdev_ops;
3312
3313 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3314 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003315}
3316
Wang Chendad9b332008-06-18 01:48:28 -07003317static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003318{
3319 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003320 uid_t uid;
3321 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003322
Patrick McHardy24023452007-07-14 18:51:31 -07003323 ASSERT_RTNL();
3324
Wang Chendad9b332008-06-18 01:48:28 -07003325 dev->flags |= IFF_PROMISC;
3326 dev->promiscuity += inc;
3327 if (dev->promiscuity == 0) {
3328 /*
3329 * Avoid overflow.
3330 * If inc causes overflow, untouch promisc and return error.
3331 */
3332 if (inc < 0)
3333 dev->flags &= ~IFF_PROMISC;
3334 else {
3335 dev->promiscuity -= inc;
3336 printk(KERN_WARNING "%s: promiscuity touches roof, "
3337 "set promiscuity failed, promiscuity feature "
3338 "of device might be broken.\n", dev->name);
3339 return -EOVERFLOW;
3340 }
3341 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003342 if (dev->flags != old_flags) {
3343 printk(KERN_INFO "device %s %s promiscuous mode\n",
3344 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3345 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003346 if (audit_enabled) {
3347 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003348 audit_log(current->audit_context, GFP_ATOMIC,
3349 AUDIT_ANOM_PROMISCUOUS,
3350 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3351 dev->name, (dev->flags & IFF_PROMISC),
3352 (old_flags & IFF_PROMISC),
3353 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003354 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003355 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003356 }
Patrick McHardy24023452007-07-14 18:51:31 -07003357
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003358 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003359 }
Wang Chendad9b332008-06-18 01:48:28 -07003360 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003361}
3362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363/**
3364 * dev_set_promiscuity - update promiscuity count on a device
3365 * @dev: device
3366 * @inc: modifier
3367 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003368 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 * remains above zero the interface remains promiscuous. Once it hits zero
3370 * the device reverts back to normal filtering operation. A negative inc
3371 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003372 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 */
Wang Chendad9b332008-06-18 01:48:28 -07003374int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375{
3376 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003377 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378
Wang Chendad9b332008-06-18 01:48:28 -07003379 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003380 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003381 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003382 if (dev->flags != old_flags)
3383 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003384 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385}
3386
3387/**
3388 * dev_set_allmulti - update allmulti count on a device
3389 * @dev: device
3390 * @inc: modifier
3391 *
3392 * Add or remove reception of all multicast frames to a device. While the
3393 * count in the device remains above zero the interface remains listening
3394 * to all interfaces. Once it hits zero the device reverts back to normal
3395 * filtering operation. A negative @inc value is used to drop the counter
3396 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003397 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 */
3399
Wang Chendad9b332008-06-18 01:48:28 -07003400int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401{
3402 unsigned short old_flags = dev->flags;
3403
Patrick McHardy24023452007-07-14 18:51:31 -07003404 ASSERT_RTNL();
3405
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003407 dev->allmulti += inc;
3408 if (dev->allmulti == 0) {
3409 /*
3410 * Avoid overflow.
3411 * If inc causes overflow, untouch allmulti and return error.
3412 */
3413 if (inc < 0)
3414 dev->flags &= ~IFF_ALLMULTI;
3415 else {
3416 dev->allmulti -= inc;
3417 printk(KERN_WARNING "%s: allmulti touches roof, "
3418 "set allmulti failed, allmulti feature of "
3419 "device might be broken.\n", dev->name);
3420 return -EOVERFLOW;
3421 }
3422 }
Patrick McHardy24023452007-07-14 18:51:31 -07003423 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003424 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003425 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003426 }
Wang Chendad9b332008-06-18 01:48:28 -07003427 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003428}
3429
3430/*
3431 * Upload unicast and multicast address lists to device and
3432 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003433 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003434 * are present.
3435 */
3436void __dev_set_rx_mode(struct net_device *dev)
3437{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003438 const struct net_device_ops *ops = dev->netdev_ops;
3439
Patrick McHardy4417da62007-06-27 01:28:10 -07003440 /* dev_open will call this function so the list will stay sane. */
3441 if (!(dev->flags&IFF_UP))
3442 return;
3443
3444 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003445 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003446
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003447 if (ops->ndo_set_rx_mode)
3448 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003449 else {
3450 /* Unicast addresses changes may only happen under the rtnl,
3451 * therefore calling __dev_set_promiscuity here is safe.
3452 */
3453 if (dev->uc_count > 0 && !dev->uc_promisc) {
3454 __dev_set_promiscuity(dev, 1);
3455 dev->uc_promisc = 1;
3456 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3457 __dev_set_promiscuity(dev, -1);
3458 dev->uc_promisc = 0;
3459 }
3460
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003461 if (ops->ndo_set_multicast_list)
3462 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003463 }
3464}
3465
3466void dev_set_rx_mode(struct net_device *dev)
3467{
David S. Millerb9e40852008-07-15 00:15:08 -07003468 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003469 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003470 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471}
3472
Jiri Pirkof001fde2009-05-05 02:48:28 +00003473/* hw addresses list handling functions */
3474
Jiri Pirkoccffad252009-05-22 23:22:17 +00003475static int __hw_addr_add(struct list_head *list, int *delta,
3476 unsigned char *addr, int addr_len,
3477 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003478{
3479 struct netdev_hw_addr *ha;
3480 int alloc_size;
3481
3482 if (addr_len > MAX_ADDR_LEN)
3483 return -EINVAL;
3484
Jiri Pirkoccffad252009-05-22 23:22:17 +00003485 list_for_each_entry(ha, list, list) {
3486 if (!memcmp(ha->addr, addr, addr_len) &&
3487 ha->type == addr_type) {
3488 ha->refcount++;
3489 return 0;
3490 }
3491 }
3492
3493
Jiri Pirkof001fde2009-05-05 02:48:28 +00003494 alloc_size = sizeof(*ha);
3495 if (alloc_size < L1_CACHE_BYTES)
3496 alloc_size = L1_CACHE_BYTES;
3497 ha = kmalloc(alloc_size, GFP_ATOMIC);
3498 if (!ha)
3499 return -ENOMEM;
3500 memcpy(ha->addr, addr, addr_len);
3501 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003502 ha->refcount = 1;
3503 ha->synced = false;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003504 list_add_tail_rcu(&ha->list, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003505 if (delta)
3506 (*delta)++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003507 return 0;
3508}
3509
3510static void ha_rcu_free(struct rcu_head *head)
3511{
3512 struct netdev_hw_addr *ha;
3513
3514 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3515 kfree(ha);
3516}
3517
Jiri Pirkoccffad252009-05-22 23:22:17 +00003518static int __hw_addr_del(struct list_head *list, int *delta,
3519 unsigned char *addr, int addr_len,
3520 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003521{
3522 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003523
3524 list_for_each_entry(ha, list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003525 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003526 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003527 if (--ha->refcount)
3528 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003529 list_del_rcu(&ha->list);
3530 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003531 if (delta)
3532 (*delta)--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003533 return 0;
3534 }
3535 }
3536 return -ENOENT;
3537}
3538
Jiri Pirkoccffad252009-05-22 23:22:17 +00003539static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3540 struct list_head *from_list, int addr_len,
3541 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003542{
3543 int err;
3544 struct netdev_hw_addr *ha, *ha2;
3545 unsigned char type;
3546
3547 list_for_each_entry(ha, from_list, list) {
3548 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003549 err = __hw_addr_add(to_list, to_delta, ha->addr,
3550 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003551 if (err)
3552 goto unroll;
3553 }
3554 return 0;
3555
3556unroll:
3557 list_for_each_entry(ha2, from_list, list) {
3558 if (ha2 == ha)
3559 break;
3560 type = addr_type ? addr_type : ha2->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003561 __hw_addr_del(to_list, to_delta, ha2->addr,
3562 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003563 }
3564 return err;
3565}
3566
Jiri Pirkoccffad252009-05-22 23:22:17 +00003567static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3568 struct list_head *from_list, int addr_len,
3569 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003570{
3571 struct netdev_hw_addr *ha;
3572 unsigned char type;
3573
3574 list_for_each_entry(ha, from_list, list) {
3575 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003576 __hw_addr_del(to_list, to_delta, ha->addr,
3577 addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003578 }
3579}
3580
Jiri Pirkoccffad252009-05-22 23:22:17 +00003581static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3582 struct list_head *from_list, int *from_delta,
3583 int addr_len)
3584{
3585 int err = 0;
3586 struct netdev_hw_addr *ha, *tmp;
3587
3588 list_for_each_entry_safe(ha, tmp, from_list, list) {
3589 if (!ha->synced) {
3590 err = __hw_addr_add(to_list, to_delta, ha->addr,
3591 addr_len, ha->type);
3592 if (err)
3593 break;
3594 ha->synced = true;
3595 ha->refcount++;
3596 } else if (ha->refcount == 1) {
3597 __hw_addr_del(to_list, to_delta, ha->addr,
3598 addr_len, ha->type);
3599 __hw_addr_del(from_list, from_delta, ha->addr,
3600 addr_len, ha->type);
3601 }
3602 }
3603 return err;
3604}
3605
3606static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3607 struct list_head *from_list, int *from_delta,
3608 int addr_len)
3609{
3610 struct netdev_hw_addr *ha, *tmp;
3611
3612 list_for_each_entry_safe(ha, tmp, from_list, list) {
3613 if (ha->synced) {
3614 __hw_addr_del(to_list, to_delta, ha->addr,
3615 addr_len, ha->type);
3616 ha->synced = false;
3617 __hw_addr_del(from_list, from_delta, ha->addr,
3618 addr_len, ha->type);
3619 }
3620 }
3621}
3622
3623
Jiri Pirkof001fde2009-05-05 02:48:28 +00003624static void __hw_addr_flush(struct list_head *list)
3625{
3626 struct netdev_hw_addr *ha, *tmp;
3627
3628 list_for_each_entry_safe(ha, tmp, list, list) {
3629 list_del_rcu(&ha->list);
3630 call_rcu(&ha->rcu_head, ha_rcu_free);
3631 }
3632}
3633
3634/* Device addresses handling functions */
3635
3636static void dev_addr_flush(struct net_device *dev)
3637{
3638 /* rtnl_mutex must be held here */
3639
3640 __hw_addr_flush(&dev->dev_addr_list);
3641 dev->dev_addr = NULL;
3642}
3643
3644static int dev_addr_init(struct net_device *dev)
3645{
3646 unsigned char addr[MAX_ADDR_LEN];
3647 struct netdev_hw_addr *ha;
3648 int err;
3649
3650 /* rtnl_mutex must be held here */
3651
3652 INIT_LIST_HEAD(&dev->dev_addr_list);
3653 memset(addr, 0, sizeof(*addr));
Jiri Pirkoccffad252009-05-22 23:22:17 +00003654 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003655 NETDEV_HW_ADDR_T_LAN);
3656 if (!err) {
3657 /*
3658 * Get the first (previously created) address from the list
3659 * and set dev_addr pointer to this location.
3660 */
3661 ha = list_first_entry(&dev->dev_addr_list,
3662 struct netdev_hw_addr, list);
3663 dev->dev_addr = ha->addr;
3664 }
3665 return err;
3666}
3667
3668/**
3669 * dev_addr_add - Add a device address
3670 * @dev: device
3671 * @addr: address to add
3672 * @addr_type: address type
3673 *
3674 * Add a device address to the device or increase the reference count if
3675 * it already exists.
3676 *
3677 * The caller must hold the rtnl_mutex.
3678 */
3679int dev_addr_add(struct net_device *dev, unsigned char *addr,
3680 unsigned char addr_type)
3681{
3682 int err;
3683
3684 ASSERT_RTNL();
3685
Jiri Pirkoccffad252009-05-22 23:22:17 +00003686 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003687 addr_type);
3688 if (!err)
3689 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3690 return err;
3691}
3692EXPORT_SYMBOL(dev_addr_add);
3693
3694/**
3695 * dev_addr_del - Release a device address.
3696 * @dev: device
3697 * @addr: address to delete
3698 * @addr_type: address type
3699 *
3700 * Release reference to a device address and remove it from the device
3701 * if the reference count drops to zero.
3702 *
3703 * The caller must hold the rtnl_mutex.
3704 */
3705int dev_addr_del(struct net_device *dev, unsigned char *addr,
3706 unsigned char addr_type)
3707{
3708 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003709 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003710
3711 ASSERT_RTNL();
3712
Jiri Pirkoccffad252009-05-22 23:22:17 +00003713 /*
3714 * We can not remove the first address from the list because
3715 * dev->dev_addr points to that.
3716 */
3717 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3718 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3719 return -ENOENT;
3720
3721 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3722 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003723 if (!err)
3724 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3725 return err;
3726}
3727EXPORT_SYMBOL(dev_addr_del);
3728
3729/**
3730 * dev_addr_add_multiple - Add device addresses from another device
3731 * @to_dev: device to which addresses will be added
3732 * @from_dev: device from which addresses will be added
3733 * @addr_type: address type - 0 means type will be used from from_dev
3734 *
3735 * Add device addresses of the one device to another.
3736 **
3737 * The caller must hold the rtnl_mutex.
3738 */
3739int dev_addr_add_multiple(struct net_device *to_dev,
3740 struct net_device *from_dev,
3741 unsigned char addr_type)
3742{
3743 int err;
3744
3745 ASSERT_RTNL();
3746
3747 if (from_dev->addr_len != to_dev->addr_len)
3748 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003749 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3750 &from_dev->dev_addr_list,
3751 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003752 if (!err)
3753 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3754 return err;
3755}
3756EXPORT_SYMBOL(dev_addr_add_multiple);
3757
3758/**
3759 * dev_addr_del_multiple - Delete device addresses by another device
3760 * @to_dev: device where the addresses will be deleted
3761 * @from_dev: device by which addresses the addresses will be deleted
3762 * @addr_type: address type - 0 means type will used from from_dev
3763 *
3764 * Deletes addresses in to device by the list of addresses in from device.
3765 *
3766 * The caller must hold the rtnl_mutex.
3767 */
3768int dev_addr_del_multiple(struct net_device *to_dev,
3769 struct net_device *from_dev,
3770 unsigned char addr_type)
3771{
3772 ASSERT_RTNL();
3773
3774 if (from_dev->addr_len != to_dev->addr_len)
3775 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003776 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3777 &from_dev->dev_addr_list,
3778 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003779 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3780 return 0;
3781}
3782EXPORT_SYMBOL(dev_addr_del_multiple);
3783
3784/* unicast and multicast addresses handling functions */
3785
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003786int __dev_addr_delete(struct dev_addr_list **list, int *count,
3787 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003788{
3789 struct dev_addr_list *da;
3790
3791 for (; (da = *list) != NULL; list = &da->next) {
3792 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3793 alen == da->da_addrlen) {
3794 if (glbl) {
3795 int old_glbl = da->da_gusers;
3796 da->da_gusers = 0;
3797 if (old_glbl == 0)
3798 break;
3799 }
3800 if (--da->da_users)
3801 return 0;
3802
3803 *list = da->next;
3804 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003805 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003806 return 0;
3807 }
3808 }
3809 return -ENOENT;
3810}
3811
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003812int __dev_addr_add(struct dev_addr_list **list, int *count,
3813 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003814{
3815 struct dev_addr_list *da;
3816
3817 for (da = *list; da != NULL; da = da->next) {
3818 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3819 da->da_addrlen == alen) {
3820 if (glbl) {
3821 int old_glbl = da->da_gusers;
3822 da->da_gusers = 1;
3823 if (old_glbl)
3824 return 0;
3825 }
3826 da->da_users++;
3827 return 0;
3828 }
3829 }
3830
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003831 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003832 if (da == NULL)
3833 return -ENOMEM;
3834 memcpy(da->da_addr, addr, alen);
3835 da->da_addrlen = alen;
3836 da->da_users = 1;
3837 da->da_gusers = glbl ? 1 : 0;
3838 da->next = *list;
3839 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003840 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003841 return 0;
3842}
3843
Patrick McHardy4417da62007-06-27 01:28:10 -07003844/**
3845 * dev_unicast_delete - Release secondary unicast address.
3846 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003847 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003848 *
3849 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003850 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003851 *
3852 * The caller must hold the rtnl_mutex.
3853 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003854int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003855{
3856 int err;
3857
3858 ASSERT_RTNL();
3859
Jiri Pirkoccffad252009-05-22 23:22:17 +00003860 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3861 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003862 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003863 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003864 return err;
3865}
3866EXPORT_SYMBOL(dev_unicast_delete);
3867
3868/**
3869 * dev_unicast_add - add a secondary unicast address
3870 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003871 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003872 *
3873 * Add a secondary unicast address to the device or increase
3874 * the reference count if it already exists.
3875 *
3876 * The caller must hold the rtnl_mutex.
3877 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003878int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003879{
3880 int err;
3881
3882 ASSERT_RTNL();
3883
Jiri Pirkoccffad252009-05-22 23:22:17 +00003884 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3885 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003886 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003887 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003888 return err;
3889}
3890EXPORT_SYMBOL(dev_unicast_add);
3891
Chris Leeche83a2ea2008-01-31 16:53:23 -08003892int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3893 struct dev_addr_list **from, int *from_count)
3894{
3895 struct dev_addr_list *da, *next;
3896 int err = 0;
3897
3898 da = *from;
3899 while (da != NULL) {
3900 next = da->next;
3901 if (!da->da_synced) {
3902 err = __dev_addr_add(to, to_count,
3903 da->da_addr, da->da_addrlen, 0);
3904 if (err < 0)
3905 break;
3906 da->da_synced = 1;
3907 da->da_users++;
3908 } else if (da->da_users == 1) {
3909 __dev_addr_delete(to, to_count,
3910 da->da_addr, da->da_addrlen, 0);
3911 __dev_addr_delete(from, from_count,
3912 da->da_addr, da->da_addrlen, 0);
3913 }
3914 da = next;
3915 }
3916 return err;
3917}
3918
3919void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3920 struct dev_addr_list **from, int *from_count)
3921{
3922 struct dev_addr_list *da, *next;
3923
3924 da = *from;
3925 while (da != NULL) {
3926 next = da->next;
3927 if (da->da_synced) {
3928 __dev_addr_delete(to, to_count,
3929 da->da_addr, da->da_addrlen, 0);
3930 da->da_synced = 0;
3931 __dev_addr_delete(from, from_count,
3932 da->da_addr, da->da_addrlen, 0);
3933 }
3934 da = next;
3935 }
3936}
3937
3938/**
3939 * dev_unicast_sync - Synchronize device's unicast list to another device
3940 * @to: destination device
3941 * @from: source device
3942 *
3943 * Add newly added addresses to the destination device and release
Jiri Pirkoccffad252009-05-22 23:22:17 +00003944 * addresses that have no users left.
Chris Leeche83a2ea2008-01-31 16:53:23 -08003945 *
3946 * This function is intended to be called from the dev->set_rx_mode
3947 * function of layered software devices.
3948 */
3949int dev_unicast_sync(struct net_device *to, struct net_device *from)
3950{
3951 int err = 0;
3952
Jiri Pirkoccffad252009-05-22 23:22:17 +00003953 ASSERT_RTNL();
3954
3955 if (to->addr_len != from->addr_len)
3956 return -EINVAL;
3957
3958 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3959 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003960 if (!err)
3961 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003962 return err;
3963}
3964EXPORT_SYMBOL(dev_unicast_sync);
3965
3966/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003967 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003968 * @to: destination device
3969 * @from: source device
3970 *
3971 * Remove all addresses that were added to the destination device by
3972 * dev_unicast_sync(). This function is intended to be called from the
3973 * dev->stop function of layered software devices.
3974 */
3975void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3976{
Jiri Pirkoccffad252009-05-22 23:22:17 +00003977 ASSERT_RTNL();
Chris Leeche83a2ea2008-01-31 16:53:23 -08003978
Jiri Pirkoccffad252009-05-22 23:22:17 +00003979 if (to->addr_len != from->addr_len)
3980 return;
3981
3982 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3983 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003984 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003985}
3986EXPORT_SYMBOL(dev_unicast_unsync);
3987
Jiri Pirkoccffad252009-05-22 23:22:17 +00003988static void dev_unicast_flush(struct net_device *dev)
3989{
3990 /* rtnl_mutex must be held here */
3991
3992 __hw_addr_flush(&dev->uc_list);
3993 dev->uc_count = 0;
3994}
3995
3996static void dev_unicast_init(struct net_device *dev)
3997{
3998 /* rtnl_mutex must be held here */
3999
4000 INIT_LIST_HEAD(&dev->uc_list);
4001}
4002
4003
Denis Cheng12972622007-07-18 02:12:56 -07004004static void __dev_addr_discard(struct dev_addr_list **list)
4005{
4006 struct dev_addr_list *tmp;
4007
4008 while (*list != NULL) {
4009 tmp = *list;
4010 *list = tmp->next;
4011 if (tmp->da_users > tmp->da_gusers)
4012 printk("__dev_addr_discard: address leakage! "
4013 "da_users=%d\n", tmp->da_users);
4014 kfree(tmp);
4015 }
4016}
4017
Denis Cheng26cc2522007-07-18 02:12:03 -07004018static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004019{
David S. Millerb9e40852008-07-15 00:15:08 -07004020 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004021
Denis Cheng456ad752007-07-18 02:10:54 -07004022 __dev_addr_discard(&dev->mc_list);
4023 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004024
David S. Millerb9e40852008-07-15 00:15:08 -07004025 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004026}
4027
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004028/**
4029 * dev_get_flags - get flags reported to userspace
4030 * @dev: device
4031 *
4032 * Get the combination of flag bits exported through APIs to userspace.
4033 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034unsigned dev_get_flags(const struct net_device *dev)
4035{
4036 unsigned flags;
4037
4038 flags = (dev->flags & ~(IFF_PROMISC |
4039 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004040 IFF_RUNNING |
4041 IFF_LOWER_UP |
4042 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 (dev->gflags & (IFF_PROMISC |
4044 IFF_ALLMULTI));
4045
Stefan Rompfb00055a2006-03-20 17:09:11 -08004046 if (netif_running(dev)) {
4047 if (netif_oper_up(dev))
4048 flags |= IFF_RUNNING;
4049 if (netif_carrier_ok(dev))
4050 flags |= IFF_LOWER_UP;
4051 if (netif_dormant(dev))
4052 flags |= IFF_DORMANT;
4053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
4055 return flags;
4056}
4057
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004058/**
4059 * dev_change_flags - change device settings
4060 * @dev: device
4061 * @flags: device state flags
4062 *
4063 * Change settings on device based state flags. The flags are
4064 * in the userspace exported format.
4065 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066int dev_change_flags(struct net_device *dev, unsigned flags)
4067{
Thomas Graf7c355f52007-06-05 16:03:03 -07004068 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 int old_flags = dev->flags;
4070
Patrick McHardy24023452007-07-14 18:51:31 -07004071 ASSERT_RTNL();
4072
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 /*
4074 * Set the flags on our device.
4075 */
4076
4077 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4078 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4079 IFF_AUTOMEDIA)) |
4080 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4081 IFF_ALLMULTI));
4082
4083 /*
4084 * Load in the correct multicast list now the flags have changed.
4085 */
4086
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004087 if ((old_flags ^ flags) & IFF_MULTICAST)
4088 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004089
Patrick McHardy4417da62007-06-27 01:28:10 -07004090 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091
4092 /*
4093 * Have we downed the interface. We handle IFF_UP ourselves
4094 * according to user attempts to set it, rather than blindly
4095 * setting it.
4096 */
4097
4098 ret = 0;
4099 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4100 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4101
4102 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004103 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 }
4105
4106 if (dev->flags & IFF_UP &&
4107 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4108 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004109 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110
4111 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4112 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4113 dev->gflags ^= IFF_PROMISC;
4114 dev_set_promiscuity(dev, inc);
4115 }
4116
4117 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4118 is important. Some (broken) drivers set IFF_PROMISC, when
4119 IFF_ALLMULTI is requested not asking us and not reporting.
4120 */
4121 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4122 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4123 dev->gflags ^= IFF_ALLMULTI;
4124 dev_set_allmulti(dev, inc);
4125 }
4126
Thomas Graf7c355f52007-06-05 16:03:03 -07004127 /* Exclude state transition flags, already notified */
4128 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4129 if (changes)
4130 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
4132 return ret;
4133}
4134
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004135/**
4136 * dev_set_mtu - Change maximum transfer unit
4137 * @dev: device
4138 * @new_mtu: new transfer unit
4139 *
4140 * Change the maximum transfer size of the network device.
4141 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142int dev_set_mtu(struct net_device *dev, int new_mtu)
4143{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004144 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 int err;
4146
4147 if (new_mtu == dev->mtu)
4148 return 0;
4149
4150 /* MTU must be positive. */
4151 if (new_mtu < 0)
4152 return -EINVAL;
4153
4154 if (!netif_device_present(dev))
4155 return -ENODEV;
4156
4157 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004158 if (ops->ndo_change_mtu)
4159 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 else
4161 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004164 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 return err;
4166}
4167
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004168/**
4169 * dev_set_mac_address - Change Media Access Control Address
4170 * @dev: device
4171 * @sa: new address
4172 *
4173 * Change the hardware (MAC) address of the device
4174 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4176{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004177 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178 int err;
4179
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004180 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 return -EOPNOTSUPP;
4182 if (sa->sa_family != dev->type)
4183 return -EINVAL;
4184 if (!netif_device_present(dev))
4185 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004186 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004188 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 return err;
4190}
4191
4192/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004193 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004195static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196{
4197 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004198 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199
4200 if (!dev)
4201 return -ENODEV;
4202
4203 switch (cmd) {
4204 case SIOCGIFFLAGS: /* Get interface flags */
4205 ifr->ifr_flags = dev_get_flags(dev);
4206 return 0;
4207
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 case SIOCGIFMETRIC: /* Get the metric on the interface
4209 (currently unused) */
4210 ifr->ifr_metric = 0;
4211 return 0;
4212
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 case SIOCGIFMTU: /* Get the MTU of a device */
4214 ifr->ifr_mtu = dev->mtu;
4215 return 0;
4216
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 case SIOCGIFHWADDR:
4218 if (!dev->addr_len)
4219 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4220 else
4221 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4222 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4223 ifr->ifr_hwaddr.sa_family = dev->type;
4224 return 0;
4225
Jeff Garzik14e3e072007-10-08 00:06:32 -07004226 case SIOCGIFSLAVE:
4227 err = -EINVAL;
4228 break;
4229
4230 case SIOCGIFMAP:
4231 ifr->ifr_map.mem_start = dev->mem_start;
4232 ifr->ifr_map.mem_end = dev->mem_end;
4233 ifr->ifr_map.base_addr = dev->base_addr;
4234 ifr->ifr_map.irq = dev->irq;
4235 ifr->ifr_map.dma = dev->dma;
4236 ifr->ifr_map.port = dev->if_port;
4237 return 0;
4238
4239 case SIOCGIFINDEX:
4240 ifr->ifr_ifindex = dev->ifindex;
4241 return 0;
4242
4243 case SIOCGIFTXQLEN:
4244 ifr->ifr_qlen = dev->tx_queue_len;
4245 return 0;
4246
4247 default:
4248 /* dev_ioctl() should ensure this case
4249 * is never reached
4250 */
4251 WARN_ON(1);
4252 err = -EINVAL;
4253 break;
4254
4255 }
4256 return err;
4257}
4258
4259/*
4260 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4261 */
4262static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4263{
4264 int err;
4265 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004266 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004267
4268 if (!dev)
4269 return -ENODEV;
4270
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004271 ops = dev->netdev_ops;
4272
Jeff Garzik14e3e072007-10-08 00:06:32 -07004273 switch (cmd) {
4274 case SIOCSIFFLAGS: /* Set interface flags */
4275 return dev_change_flags(dev, ifr->ifr_flags);
4276
4277 case SIOCSIFMETRIC: /* Set the metric on the interface
4278 (currently unused) */
4279 return -EOPNOTSUPP;
4280
4281 case SIOCSIFMTU: /* Set the MTU of a device */
4282 return dev_set_mtu(dev, ifr->ifr_mtu);
4283
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 case SIOCSIFHWADDR:
4285 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4286
4287 case SIOCSIFHWBROADCAST:
4288 if (ifr->ifr_hwaddr.sa_family != dev->type)
4289 return -EINVAL;
4290 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4291 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004292 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293 return 0;
4294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004296 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 if (!netif_device_present(dev))
4298 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004299 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 }
4301 return -EOPNOTSUPP;
4302
4303 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004304 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4306 return -EINVAL;
4307 if (!netif_device_present(dev))
4308 return -ENODEV;
4309 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4310 dev->addr_len, 1);
4311
4312 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004313 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4315 return -EINVAL;
4316 if (!netif_device_present(dev))
4317 return -ENODEV;
4318 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4319 dev->addr_len, 1);
4320
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 case SIOCSIFTXQLEN:
4322 if (ifr->ifr_qlen < 0)
4323 return -EINVAL;
4324 dev->tx_queue_len = ifr->ifr_qlen;
4325 return 0;
4326
4327 case SIOCSIFNAME:
4328 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4329 return dev_change_name(dev, ifr->ifr_newname);
4330
4331 /*
4332 * Unknown or private ioctl
4333 */
4334
4335 default:
4336 if ((cmd >= SIOCDEVPRIVATE &&
4337 cmd <= SIOCDEVPRIVATE + 15) ||
4338 cmd == SIOCBONDENSLAVE ||
4339 cmd == SIOCBONDRELEASE ||
4340 cmd == SIOCBONDSETHWADDR ||
4341 cmd == SIOCBONDSLAVEINFOQUERY ||
4342 cmd == SIOCBONDINFOQUERY ||
4343 cmd == SIOCBONDCHANGEACTIVE ||
4344 cmd == SIOCGMIIPHY ||
4345 cmd == SIOCGMIIREG ||
4346 cmd == SIOCSMIIREG ||
4347 cmd == SIOCBRADDIF ||
4348 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004349 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 cmd == SIOCWANDEV) {
4351 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004352 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004354 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 else
4356 err = -ENODEV;
4357 }
4358 } else
4359 err = -EINVAL;
4360
4361 }
4362 return err;
4363}
4364
4365/*
4366 * This function handles all "interface"-type I/O control requests. The actual
4367 * 'doing' part of this is dev_ifsioc above.
4368 */
4369
4370/**
4371 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004372 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373 * @cmd: command to issue
4374 * @arg: pointer to a struct ifreq in user space
4375 *
4376 * Issue ioctl functions to devices. This is normally called by the
4377 * user space syscall interfaces but can sometimes be useful for
4378 * other purposes. The return value is the return from the syscall if
4379 * positive or a negative errno code on error.
4380 */
4381
Eric W. Biederman881d9662007-09-17 11:56:21 -07004382int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383{
4384 struct ifreq ifr;
4385 int ret;
4386 char *colon;
4387
4388 /* One special case: SIOCGIFCONF takes ifconf argument
4389 and requires shared lock, because it sleeps writing
4390 to user space.
4391 */
4392
4393 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004394 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004395 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004396 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 return ret;
4398 }
4399 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004400 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401
4402 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4403 return -EFAULT;
4404
4405 ifr.ifr_name[IFNAMSIZ-1] = 0;
4406
4407 colon = strchr(ifr.ifr_name, ':');
4408 if (colon)
4409 *colon = 0;
4410
4411 /*
4412 * See which interface the caller is talking about.
4413 */
4414
4415 switch (cmd) {
4416 /*
4417 * These ioctl calls:
4418 * - can be done by all.
4419 * - atomic and do not require locking.
4420 * - return a value
4421 */
4422 case SIOCGIFFLAGS:
4423 case SIOCGIFMETRIC:
4424 case SIOCGIFMTU:
4425 case SIOCGIFHWADDR:
4426 case SIOCGIFSLAVE:
4427 case SIOCGIFMAP:
4428 case SIOCGIFINDEX:
4429 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004430 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004432 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433 read_unlock(&dev_base_lock);
4434 if (!ret) {
4435 if (colon)
4436 *colon = ':';
4437 if (copy_to_user(arg, &ifr,
4438 sizeof(struct ifreq)))
4439 ret = -EFAULT;
4440 }
4441 return ret;
4442
4443 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004444 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004446 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 rtnl_unlock();
4448 if (!ret) {
4449 if (colon)
4450 *colon = ':';
4451 if (copy_to_user(arg, &ifr,
4452 sizeof(struct ifreq)))
4453 ret = -EFAULT;
4454 }
4455 return ret;
4456
4457 /*
4458 * These ioctl calls:
4459 * - require superuser power.
4460 * - require strict serialization.
4461 * - return a value
4462 */
4463 case SIOCGMIIPHY:
4464 case SIOCGMIIREG:
4465 case SIOCSIFNAME:
4466 if (!capable(CAP_NET_ADMIN))
4467 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004468 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004470 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004471 rtnl_unlock();
4472 if (!ret) {
4473 if (colon)
4474 *colon = ':';
4475 if (copy_to_user(arg, &ifr,
4476 sizeof(struct ifreq)))
4477 ret = -EFAULT;
4478 }
4479 return ret;
4480
4481 /*
4482 * These ioctl calls:
4483 * - require superuser power.
4484 * - require strict serialization.
4485 * - do not return a value
4486 */
4487 case SIOCSIFFLAGS:
4488 case SIOCSIFMETRIC:
4489 case SIOCSIFMTU:
4490 case SIOCSIFMAP:
4491 case SIOCSIFHWADDR:
4492 case SIOCSIFSLAVE:
4493 case SIOCADDMULTI:
4494 case SIOCDELMULTI:
4495 case SIOCSIFHWBROADCAST:
4496 case SIOCSIFTXQLEN:
4497 case SIOCSMIIREG:
4498 case SIOCBONDENSLAVE:
4499 case SIOCBONDRELEASE:
4500 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 case SIOCBONDCHANGEACTIVE:
4502 case SIOCBRADDIF:
4503 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004504 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 if (!capable(CAP_NET_ADMIN))
4506 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004507 /* fall through */
4508 case SIOCBONDSLAVEINFOQUERY:
4509 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004510 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004512 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 rtnl_unlock();
4514 return ret;
4515
4516 case SIOCGIFMEM:
4517 /* Get the per device memory space. We can add this but
4518 * currently do not support it */
4519 case SIOCSIFMEM:
4520 /* Set the per device memory buffer space.
4521 * Not applicable in our case */
4522 case SIOCSIFLINK:
4523 return -EINVAL;
4524
4525 /*
4526 * Unknown or private ioctl.
4527 */
4528 default:
4529 if (cmd == SIOCWANDEV ||
4530 (cmd >= SIOCDEVPRIVATE &&
4531 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004532 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004534 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 rtnl_unlock();
4536 if (!ret && copy_to_user(arg, &ifr,
4537 sizeof(struct ifreq)))
4538 ret = -EFAULT;
4539 return ret;
4540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004542 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004543 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 return -EINVAL;
4545 }
4546}
4547
4548
4549/**
4550 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004551 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 *
4553 * Returns a suitable unique value for a new device interface
4554 * number. The caller must hold the rtnl semaphore or the
4555 * dev_base_lock to be sure it remains unique.
4556 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004557static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558{
4559 static int ifindex;
4560 for (;;) {
4561 if (++ifindex <= 0)
4562 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004563 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564 return ifindex;
4565 }
4566}
4567
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004569static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004571static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574}
4575
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004576static void rollback_registered(struct net_device *dev)
4577{
4578 BUG_ON(dev_boot_phase);
4579 ASSERT_RTNL();
4580
4581 /* Some devices call without registering for initialization unwind. */
4582 if (dev->reg_state == NETREG_UNINITIALIZED) {
4583 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4584 "was registered\n", dev->name, dev);
4585
4586 WARN_ON(1);
4587 return;
4588 }
4589
4590 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4591
4592 /* If device is running, close it first. */
4593 dev_close(dev);
4594
4595 /* And unlink it from device chain. */
4596 unlist_netdevice(dev);
4597
4598 dev->reg_state = NETREG_UNREGISTERING;
4599
4600 synchronize_net();
4601
4602 /* Shutdown queueing discipline. */
4603 dev_shutdown(dev);
4604
4605
4606 /* Notify protocols, that we are about to destroy
4607 this device. They should clean all the things.
4608 */
4609 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4610
4611 /*
4612 * Flush the unicast and multicast chains
4613 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004614 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004615 dev_addr_discard(dev);
4616
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004617 if (dev->netdev_ops->ndo_uninit)
4618 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004619
4620 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004621 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004622
4623 /* Remove entries from kobject tree */
4624 netdev_unregister_kobject(dev);
4625
4626 synchronize_net();
4627
4628 dev_put(dev);
4629}
4630
David S. Millere8a04642008-07-17 00:34:19 -07004631static void __netdev_init_queue_locks_one(struct net_device *dev,
4632 struct netdev_queue *dev_queue,
4633 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004634{
4635 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004636 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004637 dev_queue->xmit_lock_owner = -1;
4638}
4639
4640static void netdev_init_queue_locks(struct net_device *dev)
4641{
David S. Millere8a04642008-07-17 00:34:19 -07004642 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4643 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004644}
4645
Herbert Xub63365a2008-10-23 01:11:29 -07004646unsigned long netdev_fix_features(unsigned long features, const char *name)
4647{
4648 /* Fix illegal SG+CSUM combinations. */
4649 if ((features & NETIF_F_SG) &&
4650 !(features & NETIF_F_ALL_CSUM)) {
4651 if (name)
4652 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4653 "checksum feature.\n", name);
4654 features &= ~NETIF_F_SG;
4655 }
4656
4657 /* TSO requires that SG is present as well. */
4658 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4659 if (name)
4660 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4661 "SG feature.\n", name);
4662 features &= ~NETIF_F_TSO;
4663 }
4664
4665 if (features & NETIF_F_UFO) {
4666 if (!(features & NETIF_F_GEN_CSUM)) {
4667 if (name)
4668 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4669 "since no NETIF_F_HW_CSUM feature.\n",
4670 name);
4671 features &= ~NETIF_F_UFO;
4672 }
4673
4674 if (!(features & NETIF_F_SG)) {
4675 if (name)
4676 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4677 "since no NETIF_F_SG feature.\n", name);
4678 features &= ~NETIF_F_UFO;
4679 }
4680 }
4681
4682 return features;
4683}
4684EXPORT_SYMBOL(netdev_fix_features);
4685
Linus Torvalds1da177e2005-04-16 15:20:36 -07004686/**
4687 * register_netdevice - register a network device
4688 * @dev: device to register
4689 *
4690 * Take a completed network device structure and add it to the kernel
4691 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4692 * chain. 0 is returned on success. A negative errno code is returned
4693 * on a failure to set up the device, or if the name is a duplicate.
4694 *
4695 * Callers must hold the rtnl semaphore. You may want
4696 * register_netdev() instead of this.
4697 *
4698 * BUGS:
4699 * The locking appears insufficient to guarantee two parallel registers
4700 * will not get the same name.
4701 */
4702
4703int register_netdevice(struct net_device *dev)
4704{
4705 struct hlist_head *head;
4706 struct hlist_node *p;
4707 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004708 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709
4710 BUG_ON(dev_boot_phase);
4711 ASSERT_RTNL();
4712
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004713 might_sleep();
4714
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715 /* When net_device's are persistent, this will be fatal. */
4716 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004717 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718
David S. Millerf1f28aa2008-07-15 00:08:33 -07004719 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004720 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004721 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 dev->iflink = -1;
4724
4725 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004726 if (dev->netdev_ops->ndo_init) {
4727 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 if (ret) {
4729 if (ret > 0)
4730 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004731 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732 }
4733 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004734
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735 if (!dev_valid_name(dev->name)) {
4736 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004737 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 }
4739
Eric W. Biederman881d9662007-09-17 11:56:21 -07004740 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 if (dev->iflink == -1)
4742 dev->iflink = dev->ifindex;
4743
4744 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004745 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 hlist_for_each(p, head) {
4747 struct net_device *d
4748 = hlist_entry(p, struct net_device, name_hlist);
4749 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4750 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004751 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004753 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004755 /* Fix illegal checksum combinations */
4756 if ((dev->features & NETIF_F_HW_CSUM) &&
4757 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4758 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4759 dev->name);
4760 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4761 }
4762
4763 if ((dev->features & NETIF_F_NO_CSUM) &&
4764 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4765 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4766 dev->name);
4767 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4768 }
4769
Herbert Xub63365a2008-10-23 01:11:29 -07004770 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004772 /* Enable software GSO if SG is supported. */
4773 if (dev->features & NETIF_F_SG)
4774 dev->features |= NETIF_F_GSO;
4775
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004776 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004777 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004778 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004779 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004780 dev->reg_state = NETREG_REGISTERED;
4781
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 /*
4783 * Default initial state at registry is that the
4784 * device is present.
4785 */
4786
4787 set_bit(__LINK_STATE_PRESENT, &dev->state);
4788
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004791 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792
4793 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004794 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004795 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004796 if (ret) {
4797 rollback_registered(dev);
4798 dev->reg_state = NETREG_UNREGISTERED;
4799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800
4801out:
4802 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004803
4804err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004805 if (dev->netdev_ops->ndo_uninit)
4806 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004807 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808}
4809
4810/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004811 * init_dummy_netdev - init a dummy network device for NAPI
4812 * @dev: device to init
4813 *
4814 * This takes a network device structure and initialize the minimum
4815 * amount of fields so it can be used to schedule NAPI polls without
4816 * registering a full blown interface. This is to be used by drivers
4817 * that need to tie several hardware interfaces to a single NAPI
4818 * poll scheduler due to HW limitations.
4819 */
4820int init_dummy_netdev(struct net_device *dev)
4821{
4822 /* Clear everything. Note we don't initialize spinlocks
4823 * are they aren't supposed to be taken by any of the
4824 * NAPI code and this dummy netdev is supposed to be
4825 * only ever used for NAPI polls
4826 */
4827 memset(dev, 0, sizeof(struct net_device));
4828
4829 /* make sure we BUG if trying to hit standard
4830 * register/unregister code path
4831 */
4832 dev->reg_state = NETREG_DUMMY;
4833
4834 /* initialize the ref count */
4835 atomic_set(&dev->refcnt, 1);
4836
4837 /* NAPI wants this */
4838 INIT_LIST_HEAD(&dev->napi_list);
4839
4840 /* a dummy interface is started by default */
4841 set_bit(__LINK_STATE_PRESENT, &dev->state);
4842 set_bit(__LINK_STATE_START, &dev->state);
4843
4844 return 0;
4845}
4846EXPORT_SYMBOL_GPL(init_dummy_netdev);
4847
4848
4849/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 * register_netdev - register a network device
4851 * @dev: device to register
4852 *
4853 * Take a completed network device structure and add it to the kernel
4854 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4855 * chain. 0 is returned on success. A negative errno code is returned
4856 * on a failure to set up the device, or if the name is a duplicate.
4857 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004858 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859 * and expands the device name if you passed a format string to
4860 * alloc_netdev.
4861 */
4862int register_netdev(struct net_device *dev)
4863{
4864 int err;
4865
4866 rtnl_lock();
4867
4868 /*
4869 * If the name is a format string the caller wants us to do a
4870 * name allocation.
4871 */
4872 if (strchr(dev->name, '%')) {
4873 err = dev_alloc_name(dev, dev->name);
4874 if (err < 0)
4875 goto out;
4876 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004877
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 err = register_netdevice(dev);
4879out:
4880 rtnl_unlock();
4881 return err;
4882}
4883EXPORT_SYMBOL(register_netdev);
4884
4885/*
4886 * netdev_wait_allrefs - wait until all references are gone.
4887 *
4888 * This is called when unregistering network devices.
4889 *
4890 * Any protocol or device that holds a reference should register
4891 * for netdevice notification, and cleanup and put back the
4892 * reference if they receive an UNREGISTER event.
4893 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004894 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 */
4896static void netdev_wait_allrefs(struct net_device *dev)
4897{
4898 unsigned long rebroadcast_time, warning_time;
4899
4900 rebroadcast_time = warning_time = jiffies;
4901 while (atomic_read(&dev->refcnt) != 0) {
4902 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004903 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904
4905 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004906 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004907
4908 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4909 &dev->state)) {
4910 /* We must not have linkwatch events
4911 * pending on unregister. If this
4912 * happens, we simply run the queue
4913 * unscheduled, resulting in a noop
4914 * for this device.
4915 */
4916 linkwatch_run_queue();
4917 }
4918
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004919 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920
4921 rebroadcast_time = jiffies;
4922 }
4923
4924 msleep(250);
4925
4926 if (time_after(jiffies, warning_time + 10 * HZ)) {
4927 printk(KERN_EMERG "unregister_netdevice: "
4928 "waiting for %s to become free. Usage "
4929 "count = %d\n",
4930 dev->name, atomic_read(&dev->refcnt));
4931 warning_time = jiffies;
4932 }
4933 }
4934}
4935
4936/* The sequence is:
4937 *
4938 * rtnl_lock();
4939 * ...
4940 * register_netdevice(x1);
4941 * register_netdevice(x2);
4942 * ...
4943 * unregister_netdevice(y1);
4944 * unregister_netdevice(y2);
4945 * ...
4946 * rtnl_unlock();
4947 * free_netdev(y1);
4948 * free_netdev(y2);
4949 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004950 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004952 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 * without deadlocking with linkwatch via keventd.
4954 * 2) Since we run with the RTNL semaphore not held, we can sleep
4955 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004956 *
4957 * We must not return until all unregister events added during
4958 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960void netdev_run_todo(void)
4961{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004962 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004965 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004966
4967 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 while (!list_empty(&list)) {
4970 struct net_device *dev
4971 = list_entry(list.next, struct net_device, todo_list);
4972 list_del(&dev->todo_list);
4973
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004974 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 printk(KERN_ERR "network todo '%s' but state %d\n",
4976 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004977 dump_stack();
4978 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004980
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004981 dev->reg_state = NETREG_UNREGISTERED;
4982
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004983 on_each_cpu(flush_backlog, dev, 1);
4984
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004985 netdev_wait_allrefs(dev);
4986
4987 /* paranoia */
4988 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004989 WARN_ON(dev->ip_ptr);
4990 WARN_ON(dev->ip6_ptr);
4991 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004992
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004993 if (dev->destructor)
4994 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004995
4996 /* Free network device */
4997 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999}
5000
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005001/**
5002 * dev_get_stats - get network device statistics
5003 * @dev: device to get statistics from
5004 *
5005 * Get network statistics from device. The device driver may provide
5006 * its own method by setting dev->netdev_ops->get_stats; otherwise
5007 * the internal statistics structure is used.
5008 */
5009const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005010{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005011 const struct net_device_ops *ops = dev->netdev_ops;
5012
5013 if (ops->ndo_get_stats)
5014 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005015 else {
5016 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5017 struct net_device_stats *stats = &dev->stats;
5018 unsigned int i;
5019 struct netdev_queue *txq;
5020
5021 for (i = 0; i < dev->num_tx_queues; i++) {
5022 txq = netdev_get_tx_queue(dev, i);
5023 tx_bytes += txq->tx_bytes;
5024 tx_packets += txq->tx_packets;
5025 tx_dropped += txq->tx_dropped;
5026 }
5027 if (tx_bytes || tx_packets || tx_dropped) {
5028 stats->tx_bytes = tx_bytes;
5029 stats->tx_packets = tx_packets;
5030 stats->tx_dropped = tx_dropped;
5031 }
5032 return stats;
5033 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005034}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005035EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005036
David S. Millerdc2b4842008-07-08 17:18:23 -07005037static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005038 struct netdev_queue *queue,
5039 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005040{
David S. Millerdc2b4842008-07-08 17:18:23 -07005041 queue->dev = dev;
5042}
5043
David S. Millerbb949fb2008-07-08 16:55:56 -07005044static void netdev_init_queues(struct net_device *dev)
5045{
David S. Millere8a04642008-07-17 00:34:19 -07005046 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5047 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005048 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005049}
5050
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005052 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 * @sizeof_priv: size of private data to allocate space for
5054 * @name: device name format string
5055 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005056 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057 *
5058 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005059 * and performs basic initialization. Also allocates subquue structs
5060 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005062struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5063 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064{
David S. Millere8a04642008-07-17 00:34:19 -07005065 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005067 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005068 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005070 BUG_ON(strlen(name) >= sizeof(dev->name));
5071
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005072 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005073 if (sizeof_priv) {
5074 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005075 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005076 alloc_size += sizeof_priv;
5077 }
5078 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005079 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005081 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005083 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084 return NULL;
5085 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086
Stephen Hemminger79439862008-07-21 13:28:44 -07005087 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005088 if (!tx) {
5089 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5090 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005091 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005092 }
5093
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005094 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005096
5097 if (dev_addr_init(dev))
5098 goto free_tx;
5099
Jiri Pirkoccffad252009-05-22 23:22:17 +00005100 dev_unicast_init(dev);
5101
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005102 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
David S. Millere8a04642008-07-17 00:34:19 -07005104 dev->_tx = tx;
5105 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005106 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005107
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005108 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109
David S. Millerbb949fb2008-07-08 16:55:56 -07005110 netdev_init_queues(dev);
5111
Herbert Xud565b0a2008-12-15 23:38:52 -08005112 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005113 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114 setup(dev);
5115 strcpy(dev->name, name);
5116 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005117
5118free_tx:
5119 kfree(tx);
5120
5121free_p:
5122 kfree(p);
5123 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005125EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126
5127/**
5128 * free_netdev - free network device
5129 * @dev: device
5130 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005131 * This function does the last stage of destroying an allocated device
5132 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 * If this is the last reference then it will be freed.
5134 */
5135void free_netdev(struct net_device *dev)
5136{
Herbert Xud565b0a2008-12-15 23:38:52 -08005137 struct napi_struct *p, *n;
5138
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005139 release_net(dev_net(dev));
5140
David S. Millere8a04642008-07-17 00:34:19 -07005141 kfree(dev->_tx);
5142
Jiri Pirkof001fde2009-05-05 02:48:28 +00005143 /* Flush device addresses */
5144 dev_addr_flush(dev);
5145
Herbert Xud565b0a2008-12-15 23:38:52 -08005146 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5147 netif_napi_del(p);
5148
Stephen Hemminger3041a062006-05-26 13:25:24 -07005149 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150 if (dev->reg_state == NETREG_UNINITIALIZED) {
5151 kfree((char *)dev - dev->padded);
5152 return;
5153 }
5154
5155 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5156 dev->reg_state = NETREG_RELEASED;
5157
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005158 /* will free via device release */
5159 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005161
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005162/**
5163 * synchronize_net - Synchronize with packet receive processing
5164 *
5165 * Wait for packets currently being received to be done.
5166 * Does not block later packets from starting.
5167 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005168void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169{
5170 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005171 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172}
5173
5174/**
5175 * unregister_netdevice - remove device from the kernel
5176 * @dev: device
5177 *
5178 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005179 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180 *
5181 * Callers must hold the rtnl semaphore. You may want
5182 * unregister_netdev() instead of this.
5183 */
5184
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005185void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186{
Herbert Xua6620712007-12-12 19:21:56 -08005187 ASSERT_RTNL();
5188
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005189 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 /* Finish processing unregister after unlock */
5191 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192}
5193
5194/**
5195 * unregister_netdev - remove device from the kernel
5196 * @dev: device
5197 *
5198 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005199 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200 *
5201 * This is just a wrapper for unregister_netdevice that takes
5202 * the rtnl semaphore. In general you want to use this and not
5203 * unregister_netdevice.
5204 */
5205void unregister_netdev(struct net_device *dev)
5206{
5207 rtnl_lock();
5208 unregister_netdevice(dev);
5209 rtnl_unlock();
5210}
5211
5212EXPORT_SYMBOL(unregister_netdev);
5213
Eric W. Biedermance286d32007-09-12 13:53:49 +02005214/**
5215 * dev_change_net_namespace - move device to different nethost namespace
5216 * @dev: device
5217 * @net: network namespace
5218 * @pat: If not NULL name pattern to try if the current device name
5219 * is already taken in the destination network namespace.
5220 *
5221 * This function shuts down a device interface and moves it
5222 * to a new network namespace. On success 0 is returned, on
5223 * a failure a netagive errno code is returned.
5224 *
5225 * Callers must hold the rtnl semaphore.
5226 */
5227
5228int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5229{
5230 char buf[IFNAMSIZ];
5231 const char *destname;
5232 int err;
5233
5234 ASSERT_RTNL();
5235
5236 /* Don't allow namespace local devices to be moved. */
5237 err = -EINVAL;
5238 if (dev->features & NETIF_F_NETNS_LOCAL)
5239 goto out;
5240
Eric W. Biederman38918452008-10-27 17:51:47 -07005241#ifdef CONFIG_SYSFS
5242 /* Don't allow real devices to be moved when sysfs
5243 * is enabled.
5244 */
5245 err = -EINVAL;
5246 if (dev->dev.parent)
5247 goto out;
5248#endif
5249
Eric W. Biedermance286d32007-09-12 13:53:49 +02005250 /* Ensure the device has been registrered */
5251 err = -EINVAL;
5252 if (dev->reg_state != NETREG_REGISTERED)
5253 goto out;
5254
5255 /* Get out if there is nothing todo */
5256 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005257 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005258 goto out;
5259
5260 /* Pick the destination device name, and ensure
5261 * we can use it in the destination network namespace.
5262 */
5263 err = -EEXIST;
5264 destname = dev->name;
5265 if (__dev_get_by_name(net, destname)) {
5266 /* We get here if we can't use the current device name */
5267 if (!pat)
5268 goto out;
5269 if (!dev_valid_name(pat))
5270 goto out;
5271 if (strchr(pat, '%')) {
5272 if (__dev_alloc_name(net, pat, buf) < 0)
5273 goto out;
5274 destname = buf;
5275 } else
5276 destname = pat;
5277 if (__dev_get_by_name(net, destname))
5278 goto out;
5279 }
5280
5281 /*
5282 * And now a mini version of register_netdevice unregister_netdevice.
5283 */
5284
5285 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005286 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005287
5288 /* And unlink it from device chain */
5289 err = -ENODEV;
5290 unlist_netdevice(dev);
5291
5292 synchronize_net();
5293
5294 /* Shutdown queueing discipline. */
5295 dev_shutdown(dev);
5296
5297 /* Notify protocols, that we are about to destroy
5298 this device. They should clean all the things.
5299 */
5300 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5301
5302 /*
5303 * Flush the unicast and multicast chains
5304 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005305 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005306 dev_addr_discard(dev);
5307
Eric W. Biederman38918452008-10-27 17:51:47 -07005308 netdev_unregister_kobject(dev);
5309
Eric W. Biedermance286d32007-09-12 13:53:49 +02005310 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005311 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005312
5313 /* Assign the new device name */
5314 if (destname != dev->name)
5315 strcpy(dev->name, destname);
5316
5317 /* If there is an ifindex conflict assign a new one */
5318 if (__dev_get_by_index(net, dev->ifindex)) {
5319 int iflink = (dev->iflink == dev->ifindex);
5320 dev->ifindex = dev_new_index(net);
5321 if (iflink)
5322 dev->iflink = dev->ifindex;
5323 }
5324
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005325 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005326 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005327 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005328
5329 /* Add the device back in the hashes */
5330 list_netdevice(dev);
5331
5332 /* Notify protocols, that a new device appeared. */
5333 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5334
5335 synchronize_net();
5336 err = 0;
5337out:
5338 return err;
5339}
5340
Linus Torvalds1da177e2005-04-16 15:20:36 -07005341static int dev_cpu_callback(struct notifier_block *nfb,
5342 unsigned long action,
5343 void *ocpu)
5344{
5345 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005346 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 struct sk_buff *skb;
5348 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5349 struct softnet_data *sd, *oldsd;
5350
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005351 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352 return NOTIFY_OK;
5353
5354 local_irq_disable();
5355 cpu = smp_processor_id();
5356 sd = &per_cpu(softnet_data, cpu);
5357 oldsd = &per_cpu(softnet_data, oldcpu);
5358
5359 /* Find end of our completion_queue. */
5360 list_skb = &sd->completion_queue;
5361 while (*list_skb)
5362 list_skb = &(*list_skb)->next;
5363 /* Append completion queue from offline CPU. */
5364 *list_skb = oldsd->completion_queue;
5365 oldsd->completion_queue = NULL;
5366
5367 /* Find end of our output_queue. */
5368 list_net = &sd->output_queue;
5369 while (*list_net)
5370 list_net = &(*list_net)->next_sched;
5371 /* Append output queue from offline CPU. */
5372 *list_net = oldsd->output_queue;
5373 oldsd->output_queue = NULL;
5374
5375 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5376 local_irq_enable();
5377
5378 /* Process offline CPU's input_pkt_queue */
5379 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5380 netif_rx(skb);
5381
5382 return NOTIFY_OK;
5383}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384
5385
Herbert Xu7f353bf2007-08-10 15:47:58 -07005386/**
Herbert Xub63365a2008-10-23 01:11:29 -07005387 * netdev_increment_features - increment feature set by one
5388 * @all: current feature set
5389 * @one: new feature set
5390 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005391 *
5392 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005393 * @one to the master device with current feature set @all. Will not
5394 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005395 */
Herbert Xub63365a2008-10-23 01:11:29 -07005396unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5397 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005398{
Herbert Xub63365a2008-10-23 01:11:29 -07005399 /* If device needs checksumming, downgrade to it. */
5400 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5401 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5402 else if (mask & NETIF_F_ALL_CSUM) {
5403 /* If one device supports v4/v6 checksumming, set for all. */
5404 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5405 !(all & NETIF_F_GEN_CSUM)) {
5406 all &= ~NETIF_F_ALL_CSUM;
5407 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5408 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005409
Herbert Xub63365a2008-10-23 01:11:29 -07005410 /* If one device supports hw checksumming, set for all. */
5411 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5412 all &= ~NETIF_F_ALL_CSUM;
5413 all |= NETIF_F_HW_CSUM;
5414 }
5415 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005416
Herbert Xub63365a2008-10-23 01:11:29 -07005417 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005418
Herbert Xub63365a2008-10-23 01:11:29 -07005419 one |= all & NETIF_F_ONE_FOR_ALL;
5420 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5421 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005422
5423 return all;
5424}
Herbert Xub63365a2008-10-23 01:11:29 -07005425EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005426
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005427static struct hlist_head *netdev_create_hash(void)
5428{
5429 int i;
5430 struct hlist_head *hash;
5431
5432 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5433 if (hash != NULL)
5434 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5435 INIT_HLIST_HEAD(&hash[i]);
5436
5437 return hash;
5438}
5439
Eric W. Biederman881d9662007-09-17 11:56:21 -07005440/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005441static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005442{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005443 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005444
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005445 net->dev_name_head = netdev_create_hash();
5446 if (net->dev_name_head == NULL)
5447 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005448
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005449 net->dev_index_head = netdev_create_hash();
5450 if (net->dev_index_head == NULL)
5451 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005452
5453 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005454
5455err_idx:
5456 kfree(net->dev_name_head);
5457err_name:
5458 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005459}
5460
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005461/**
5462 * netdev_drivername - network driver for the device
5463 * @dev: network device
5464 * @buffer: buffer for resulting name
5465 * @len: size of buffer
5466 *
5467 * Determine network driver for device.
5468 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005469char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005470{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005471 const struct device_driver *driver;
5472 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005473
5474 if (len <= 0 || !buffer)
5475 return buffer;
5476 buffer[0] = 0;
5477
5478 parent = dev->dev.parent;
5479
5480 if (!parent)
5481 return buffer;
5482
5483 driver = parent->driver;
5484 if (driver && driver->name)
5485 strlcpy(buffer, driver->name, len);
5486 return buffer;
5487}
5488
Pavel Emelyanov46650792007-10-08 20:38:39 -07005489static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005490{
5491 kfree(net->dev_name_head);
5492 kfree(net->dev_index_head);
5493}
5494
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005495static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005496 .init = netdev_init,
5497 .exit = netdev_exit,
5498};
5499
Pavel Emelyanov46650792007-10-08 20:38:39 -07005500static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005501{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005502 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005503 /*
5504 * Push all migratable of the network devices back to the
5505 * initial network namespace
5506 */
5507 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005508restart:
5509 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005510 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005511 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005512
5513 /* Ignore unmoveable devices (i.e. loopback) */
5514 if (dev->features & NETIF_F_NETNS_LOCAL)
5515 continue;
5516
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005517 /* Delete virtual devices */
5518 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5519 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005520 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005521 }
5522
Eric W. Biedermance286d32007-09-12 13:53:49 +02005523 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005524 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5525 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005526 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005527 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005528 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005529 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005530 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005531 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005532 }
5533 rtnl_unlock();
5534}
5535
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005536static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005537 .exit = default_device_exit,
5538};
5539
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540/*
5541 * Initialize the DEV module. At boot time this walks the device list and
5542 * unhooks any devices that fail to initialise (normally hardware not
5543 * present) and leaves us with a valid list of present and active devices.
5544 *
5545 */
5546
5547/*
5548 * This is called single threaded during boot, so no need
5549 * to take the rtnl semaphore.
5550 */
5551static int __init net_dev_init(void)
5552{
5553 int i, rc = -ENOMEM;
5554
5555 BUG_ON(!dev_boot_phase);
5556
Linus Torvalds1da177e2005-04-16 15:20:36 -07005557 if (dev_proc_init())
5558 goto out;
5559
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005560 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561 goto out;
5562
5563 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005564 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 INIT_LIST_HEAD(&ptype_base[i]);
5566
Eric W. Biederman881d9662007-09-17 11:56:21 -07005567 if (register_pernet_subsys(&netdev_net_ops))
5568 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569
5570 /*
5571 * Initialise the packet receive queues.
5572 */
5573
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005574 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575 struct softnet_data *queue;
5576
5577 queue = &per_cpu(softnet_data, i);
5578 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579 queue->completion_queue = NULL;
5580 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005581
5582 queue->backlog.poll = process_backlog;
5583 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005584 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005585 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005586 }
5587
Linus Torvalds1da177e2005-04-16 15:20:36 -07005588 dev_boot_phase = 0;
5589
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005590 /* The loopback device is special if any other network devices
5591 * is present in a network namespace the loopback device must
5592 * be present. Since we now dynamically allocate and free the
5593 * loopback device ensure this invariant is maintained by
5594 * keeping the loopback device as the first device on the
5595 * list of network devices. Ensuring the loopback devices
5596 * is the first device that appears and the last network device
5597 * that disappears.
5598 */
5599 if (register_pernet_device(&loopback_net_ops))
5600 goto out;
5601
5602 if (register_pernet_device(&default_device_ops))
5603 goto out;
5604
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005605 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5606 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005607
5608 hotcpu_notifier(dev_cpu_callback, 0);
5609 dst_init();
5610 dev_mcast_init();
5611 rc = 0;
5612out:
5613 return rc;
5614}
5615
5616subsys_initcall(net_dev_init);
5617
Krishna Kumare88721f2009-02-18 17:55:02 -08005618static int __init initialize_hashrnd(void)
5619{
5620 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5621 return 0;
5622}
5623
5624late_initcall_sync(initialize_hashrnd);
5625
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626EXPORT_SYMBOL(__dev_get_by_index);
5627EXPORT_SYMBOL(__dev_get_by_name);
5628EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005629EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005630EXPORT_SYMBOL(dev_add_pack);
5631EXPORT_SYMBOL(dev_alloc_name);
5632EXPORT_SYMBOL(dev_close);
5633EXPORT_SYMBOL(dev_get_by_flags);
5634EXPORT_SYMBOL(dev_get_by_index);
5635EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636EXPORT_SYMBOL(dev_open);
5637EXPORT_SYMBOL(dev_queue_xmit);
5638EXPORT_SYMBOL(dev_remove_pack);
5639EXPORT_SYMBOL(dev_set_allmulti);
5640EXPORT_SYMBOL(dev_set_promiscuity);
5641EXPORT_SYMBOL(dev_change_flags);
5642EXPORT_SYMBOL(dev_set_mtu);
5643EXPORT_SYMBOL(dev_set_mac_address);
5644EXPORT_SYMBOL(free_netdev);
5645EXPORT_SYMBOL(netdev_boot_setup_check);
5646EXPORT_SYMBOL(netdev_set_master);
5647EXPORT_SYMBOL(netdev_state_change);
5648EXPORT_SYMBOL(netif_receive_skb);
5649EXPORT_SYMBOL(netif_rx);
5650EXPORT_SYMBOL(register_gifconf);
5651EXPORT_SYMBOL(register_netdevice);
5652EXPORT_SYMBOL(register_netdevice_notifier);
5653EXPORT_SYMBOL(skb_checksum_help);
5654EXPORT_SYMBOL(synchronize_net);
5655EXPORT_SYMBOL(unregister_netdevice);
5656EXPORT_SYMBOL(unregister_netdevice_notifier);
5657EXPORT_SYMBOL(net_enable_timestamp);
5658EXPORT_SYMBOL(net_disable_timestamp);
5659EXPORT_SYMBOL(dev_get_flags);
5660
5661#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5662EXPORT_SYMBOL(br_handle_frame_hook);
5663EXPORT_SYMBOL(br_fdb_get_hook);
5664EXPORT_SYMBOL(br_fdb_put_hook);
5665#endif
5666
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
5669EXPORT_PER_CPU_SYMBOL(softnet_data);