blob: e2fcc5f101776ed58c130da02d42a20b76ce8a76 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Neil Horman4ea7e382009-05-21 07:36:08 +0000129#include <trace/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800272 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700273
274static const char *netdev_lock_name[] =
275 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
276 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
277 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
278 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
279 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
280 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
281 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
282 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
283 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
284 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
285 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
286 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
287 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800288 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Rémi Denis-Courmont57c81ff2008-12-17 15:47:48 -0800289 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700290
291static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700292static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700293
294static inline unsigned short netdev_lock_pos(unsigned short dev_type)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
299 if (netdev_lock_type[i] == dev_type)
300 return i;
301 /* the last key is used by default */
302 return ARRAY_SIZE(netdev_lock_type) - 1;
303}
304
David S. Millercf508b12008-07-22 14:16:42 -0700305static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
306 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700307{
308 int i;
309
310 i = netdev_lock_pos(dev_type);
311 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
312 netdev_lock_name[i]);
313}
David S. Millercf508b12008-07-22 14:16:42 -0700314
315static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
316{
317 int i;
318
319 i = netdev_lock_pos(dev->type);
320 lockdep_set_class_and_name(&dev->addr_list_lock,
321 &netdev_addr_lock_key[i],
322 netdev_lock_name[i]);
323}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700324#else
David S. Millercf508b12008-07-22 14:16:42 -0700325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
327{
328}
329static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700330{
331}
332#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/*******************************************************************************
335
336 Protocol management and registration routines
337
338*******************************************************************************/
339
340/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * Add a protocol ID to the list. Now that the input handler is
342 * smarter we can dispense with all the messy stuff that used to be
343 * here.
344 *
345 * BEWARE!!! Protocol handlers, mangling input packets,
346 * MUST BE last in hash buckets and checking protocol handlers
347 * MUST start from promiscuous ptype_all chain in net_bh.
348 * It is true now, do not change it.
349 * Explanation follows: if protocol handler, mangling packet, will
350 * be the first on list, it is not able to sense, that packet
351 * is cloned and should be copied-on-write, so that it will
352 * change it and subsequent readers will get broken packet.
353 * --ANK (980803)
354 */
355
356/**
357 * dev_add_pack - add packet handler
358 * @pt: packet type declaration
359 *
360 * Add a protocol handler to the networking stack. The passed &packet_type
361 * is linked into kernel lists and may not be freed until it has been
362 * removed from the kernel lists.
363 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900364 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * guarantee all CPU's that are in middle of receiving packets
366 * will see the new packet type (until the next received packet).
367 */
368
369void dev_add_pack(struct packet_type *pt)
370{
371 int hash;
372
373 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700374 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800377 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 list_add_rcu(&pt->list, &ptype_base[hash]);
379 }
380 spin_unlock_bh(&ptype_lock);
381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * __dev_remove_pack - remove packet handler
385 * @pt: packet type declaration
386 *
387 * Remove a protocol handler that was previously added to the kernel
388 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
389 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900390 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 *
392 * The packet type might still be in use by receivers
393 * and must not be freed until after all the CPU's have gone
394 * through a quiescent state.
395 */
396void __dev_remove_pack(struct packet_type *pt)
397{
398 struct list_head *head;
399 struct packet_type *pt1;
400
401 spin_lock_bh(&ptype_lock);
402
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700403 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800406 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 list_for_each_entry(pt1, head, list) {
409 if (pt == pt1) {
410 list_del_rcu(&pt->list);
411 goto out;
412 }
413 }
414
415 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
416out:
417 spin_unlock_bh(&ptype_lock);
418}
419/**
420 * dev_remove_pack - remove packet handler
421 * @pt: packet type declaration
422 *
423 * Remove a protocol handler that was previously added to the kernel
424 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
425 * from the kernel lists and can be freed or reused once this function
426 * returns.
427 *
428 * This call sleeps to guarantee that no CPU is looking at the packet
429 * type after return.
430 */
431void dev_remove_pack(struct packet_type *pt)
432{
433 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 synchronize_net();
436}
437
438/******************************************************************************
439
440 Device Boot-time Settings Routines
441
442*******************************************************************************/
443
444/* Boot time configuration table */
445static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
446
447/**
448 * netdev_boot_setup_add - add new setup entry
449 * @name: name of the device
450 * @map: configured settings for the device
451 *
452 * Adds new setup entry to the dev_boot_setup list. The function
453 * returns 0 on error and 1 on success. This is a generic routine to
454 * all netdevices.
455 */
456static int netdev_boot_setup_add(char *name, struct ifmap *map)
457{
458 struct netdev_boot_setup *s;
459 int i;
460
461 s = dev_boot_setup;
462 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
463 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
464 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700465 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 memcpy(&s[i].map, map, sizeof(s[i].map));
467 break;
468 }
469 }
470
471 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
472}
473
474/**
475 * netdev_boot_setup_check - check boot time settings
476 * @dev: the netdevice
477 *
478 * Check boot time settings for the device.
479 * The found settings are set for the device to be used
480 * later in the device probing.
481 * Returns 0 if no settings found, 1 if they are.
482 */
483int netdev_boot_setup_check(struct net_device *dev)
484{
485 struct netdev_boot_setup *s = dev_boot_setup;
486 int i;
487
488 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
489 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700490 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 dev->irq = s[i].map.irq;
492 dev->base_addr = s[i].map.base_addr;
493 dev->mem_start = s[i].map.mem_start;
494 dev->mem_end = s[i].map.mem_end;
495 return 1;
496 }
497 }
498 return 0;
499}
500
501
502/**
503 * netdev_boot_base - get address from boot time settings
504 * @prefix: prefix for network device
505 * @unit: id for network device
506 *
507 * Check boot time settings for the base address of device.
508 * The found settings are set for the device to be used
509 * later in the device probing.
510 * Returns 0 if no settings found.
511 */
512unsigned long netdev_boot_base(const char *prefix, int unit)
513{
514 const struct netdev_boot_setup *s = dev_boot_setup;
515 char name[IFNAMSIZ];
516 int i;
517
518 sprintf(name, "%s%d", prefix, unit);
519
520 /*
521 * If device already registered then return base of 1
522 * to indicate not to probe for this interface
523 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700524 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return 1;
526
527 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
528 if (!strcmp(name, s[i].name))
529 return s[i].map.base_addr;
530 return 0;
531}
532
533/*
534 * Saves at boot time configured settings for any netdevice.
535 */
536int __init netdev_boot_setup(char *str)
537{
538 int ints[5];
539 struct ifmap map;
540
541 str = get_options(str, ARRAY_SIZE(ints), ints);
542 if (!str || !*str)
543 return 0;
544
545 /* Save settings */
546 memset(&map, 0, sizeof(map));
547 if (ints[0] > 0)
548 map.irq = ints[1];
549 if (ints[0] > 1)
550 map.base_addr = ints[2];
551 if (ints[0] > 2)
552 map.mem_start = ints[3];
553 if (ints[0] > 3)
554 map.mem_end = ints[4];
555
556 /* Add new entry to the list */
557 return netdev_boot_setup_add(str, &map);
558}
559
560__setup("netdev=", netdev_boot_setup);
561
562/*******************************************************************************
563
564 Device Interface Subroutines
565
566*******************************************************************************/
567
568/**
569 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700570 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 * @name: name to find
572 *
573 * Find an interface by name. Must be called under RTNL semaphore
574 * or @dev_base_lock. If the name is found a pointer to the device
575 * is returned. If the name is not found then %NULL is returned. The
576 * reference counters are not incremented so the caller must be
577 * careful with locks.
578 */
579
Eric W. Biederman881d9662007-09-17 11:56:21 -0700580struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
582 struct hlist_node *p;
583
Eric W. Biederman881d9662007-09-17 11:56:21 -0700584 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct net_device *dev
586 = hlist_entry(p, struct net_device, name_hlist);
587 if (!strncmp(dev->name, name, IFNAMSIZ))
588 return dev;
589 }
590 return NULL;
591}
592
593/**
594 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700595 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 * @name: name to find
597 *
598 * Find an interface by name. This can be called from any
599 * context and does its own locking. The returned handle has
600 * the usage count incremented and the caller must use dev_put() to
601 * release it when it is no longer needed. %NULL is returned if no
602 * matching device is found.
603 */
604
Eric W. Biederman881d9662007-09-17 11:56:21 -0700605struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
607 struct net_device *dev;
608
609 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700610 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 if (dev)
612 dev_hold(dev);
613 read_unlock(&dev_base_lock);
614 return dev;
615}
616
617/**
618 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700619 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 * @ifindex: index of device
621 *
622 * Search for an interface by index. Returns %NULL if the device
623 * is not found or a pointer to the device. The device has not
624 * had its reference counter increased so the caller must be careful
625 * about locking. The caller must hold either the RTNL semaphore
626 * or @dev_base_lock.
627 */
628
Eric W. Biederman881d9662007-09-17 11:56:21 -0700629struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 struct hlist_node *p;
632
Eric W. Biederman881d9662007-09-17 11:56:21 -0700633 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 struct net_device *dev
635 = hlist_entry(p, struct net_device, index_hlist);
636 if (dev->ifindex == ifindex)
637 return dev;
638 }
639 return NULL;
640}
641
642
643/**
644 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700645 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 * @ifindex: index of device
647 *
648 * Search for an interface by index. Returns NULL if the device
649 * is not found or a pointer to the device. The device returned has
650 * had a reference added and the pointer is safe until the user calls
651 * dev_put to indicate they have finished with it.
652 */
653
Eric W. Biederman881d9662007-09-17 11:56:21 -0700654struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 struct net_device *dev;
657
658 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700659 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (dev)
661 dev_hold(dev);
662 read_unlock(&dev_base_lock);
663 return dev;
664}
665
666/**
667 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700668 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 * @type: media type of device
670 * @ha: hardware address
671 *
672 * Search for an interface by MAC address. Returns NULL if the device
673 * is not found or a pointer to the device. The caller must hold the
674 * rtnl semaphore. The returned device has not had its ref count increased
675 * and the caller must therefore be careful about locking
676 *
677 * BUGS:
678 * If the API was consistent this would be __dev_get_by_hwaddr
679 */
680
Eric W. Biederman881d9662007-09-17 11:56:21 -0700681struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 struct net_device *dev;
684
685 ASSERT_RTNL();
686
Denis V. Lunev81103a52007-12-12 10:47:38 -0800687 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (dev->type == type &&
689 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700690 return dev;
691
692 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300695EXPORT_SYMBOL(dev_getbyhwaddr);
696
Eric W. Biederman881d9662007-09-17 11:56:21 -0700697struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700698{
699 struct net_device *dev;
700
701 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700702 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700703 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700704 return dev;
705
706 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700707}
708
709EXPORT_SYMBOL(__dev_getfirstbyhwtype);
710
Eric W. Biederman881d9662007-09-17 11:56:21 -0700711struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712{
713 struct net_device *dev;
714
715 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700716 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717 if (dev)
718 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 rtnl_unlock();
720 return dev;
721}
722
723EXPORT_SYMBOL(dev_getfirstbyhwtype);
724
725/**
726 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700727 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 * @if_flags: IFF_* values
729 * @mask: bitmask of bits in if_flags to check
730 *
731 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900732 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * had a reference added and the pointer is safe until the user calls
734 * dev_put to indicate they have finished with it.
735 */
736
Eric W. Biederman881d9662007-09-17 11:56:21 -0700737struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700743 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (((dev->flags ^ if_flags) & mask) == 0) {
745 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700746 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 break;
748 }
749 }
750 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/**
755 * dev_valid_name - check if name is okay for network device
756 * @name: name string
757 *
758 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700759 * to allow sysfs to work. We also disallow any kind of
760 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800762int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700764 if (*name == '\0')
765 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700766 if (strlen(name) >= IFNAMSIZ)
767 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700768 if (!strcmp(name, ".") || !strcmp(name, ".."))
769 return 0;
770
771 while (*name) {
772 if (*name == '/' || isspace(*name))
773 return 0;
774 name++;
775 }
776 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
779/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200780 * __dev_alloc_name - allocate a name for a device
781 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200783 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 *
785 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700786 * id. It scans list of devices to build up a free map, then chooses
787 * the first empty slot. The caller must hold the dev_base or rtnl lock
788 * while allocating the name and adding the device in order to avoid
789 * duplicates.
790 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
791 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 */
793
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
796 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 const char *p;
798 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700799 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct net_device *d;
801
802 p = strnchr(name, IFNAMSIZ-1, '%');
803 if (p) {
804 /*
805 * Verify the string as this thing may have come from
806 * the user. There must be either one "%d" and no other "%"
807 * characters.
808 */
809 if (p[1] != 'd' || strchr(p + 2, '%'))
810 return -EINVAL;
811
812 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700813 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!inuse)
815 return -ENOMEM;
816
Eric W. Biederman881d9662007-09-17 11:56:21 -0700817 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!sscanf(d->name, name, &i))
819 continue;
820 if (i < 0 || i >= max_netdevices)
821 continue;
822
823 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200824 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!strncmp(buf, d->name, IFNAMSIZ))
826 set_bit(i, inuse);
827 }
828
829 i = find_first_zero_bit(inuse, max_netdevices);
830 free_page((unsigned long) inuse);
831 }
832
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200833 snprintf(buf, IFNAMSIZ, name, i);
834 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
837 /* It is possible to run out of possible slots
838 * when the name is long and there isn't enough space left
839 * for the digits, or if all bits are used.
840 */
841 return -ENFILE;
842}
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844/**
845 * dev_alloc_name - allocate a name for a device
846 * @dev: device
847 * @name: name format string
848 *
849 * Passed a format string - eg "lt%d" it will try and find a suitable
850 * id. It scans list of devices to build up a free map, then chooses
851 * the first empty slot. The caller must hold the dev_base or rtnl lock
852 * while allocating the name and adding the device in order to avoid
853 * duplicates.
854 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
855 * Returns the number of the unit assigned or a negative errno code.
856 */
857
858int dev_alloc_name(struct net_device *dev, const char *name)
859{
860 char buf[IFNAMSIZ];
861 struct net *net;
862 int ret;
863
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900864 BUG_ON(!dev_net(dev));
865 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200866 ret = __dev_alloc_name(net, name, buf);
867 if (ret >= 0)
868 strlcpy(dev->name, buf, IFNAMSIZ);
869 return ret;
870}
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873/**
874 * dev_change_name - change name of a device
875 * @dev: device
876 * @newname: name (or format string) must be at least IFNAMSIZ
877 *
878 * Change name of a device, can pass format strings "eth%d".
879 * for wildcarding.
880 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700881int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
Herbert Xufcc5a032007-07-30 17:03:38 -0700883 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700886 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900889 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (dev->flags & IFF_UP)
893 return -EBUSY;
894
895 if (!dev_valid_name(newname))
896 return -EINVAL;
897
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700898 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
899 return 0;
900
Herbert Xufcc5a032007-07-30 17:03:38 -0700901 memcpy(oldname, dev->name, IFNAMSIZ);
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (strchr(newname, '%')) {
904 err = dev_alloc_name(dev, newname);
905 if (err < 0)
906 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700908 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return -EEXIST;
910 else
911 strlcpy(dev->name, newname, IFNAMSIZ);
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700914 /* For now only devices in the initial network namespace
915 * are in sysfs.
916 */
917 if (net == &init_net) {
918 ret = device_rename(&dev->dev, dev->name);
919 if (ret) {
920 memcpy(dev->name, oldname, IFNAMSIZ);
921 return ret;
922 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700923 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700924
925 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600926 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700927 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700928 write_unlock_bh(&dev_base_lock);
929
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700930 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700931 ret = notifier_to_errno(ret);
932
933 if (ret) {
934 if (err) {
935 printk(KERN_ERR
936 "%s: name change rollback failed: %d.\n",
937 dev->name, ret);
938 } else {
939 err = ret;
940 memcpy(dev->name, oldname, IFNAMSIZ);
941 goto rollback;
942 }
943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
945 return err;
946}
947
948/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700949 * dev_set_alias - change ifalias of a device
950 * @dev: device
951 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700952 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700953 *
954 * Set ifalias for a device,
955 */
956int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
957{
958 ASSERT_RTNL();
959
960 if (len >= IFALIASZ)
961 return -EINVAL;
962
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700963 if (!len) {
964 if (dev->ifalias) {
965 kfree(dev->ifalias);
966 dev->ifalias = NULL;
967 }
968 return 0;
969 }
970
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700971 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
972 if (!dev->ifalias)
973 return -ENOMEM;
974
975 strlcpy(dev->ifalias, alias, len+1);
976 return len;
977}
978
979
980/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700981 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700982 * @dev: device to cause notification
983 *
984 * Called to indicate a device has changed features.
985 */
986void netdev_features_change(struct net_device *dev)
987{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700988 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700989}
990EXPORT_SYMBOL(netdev_features_change);
991
992/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * netdev_state_change - device changes state
994 * @dev: device to cause notification
995 *
996 * Called to indicate a device has changed state. This function calls
997 * the notifier chains for netdev_chain and sends a NEWLINK message
998 * to the routing socket.
999 */
1000void netdev_state_change(struct net_device *dev)
1001{
1002 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001003 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1005 }
1006}
1007
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001008void netdev_bonding_change(struct net_device *dev)
1009{
1010 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1011}
1012EXPORT_SYMBOL(netdev_bonding_change);
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014/**
1015 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001016 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 * @name: name of interface
1018 *
1019 * If a network interface is not present and the process has suitable
1020 * privileges this function loads the module. If module loading is not
1021 * available in this kernel then it becomes a nop.
1022 */
1023
Eric W. Biederman881d9662007-09-17 11:56:21 -07001024void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001026 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001029 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 read_unlock(&dev_base_lock);
1031
1032 if (!dev && capable(CAP_SYS_MODULE))
1033 request_module("%s", name);
1034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036/**
1037 * dev_open - prepare an interface for use.
1038 * @dev: device to open
1039 *
1040 * Takes a device from down to up state. The device's private open
1041 * function is invoked and then the multicast lists are loaded. Finally
1042 * the device is moved into the up state and a %NETDEV_UP message is
1043 * sent to the netdev notifier chain.
1044 *
1045 * Calling this function on an active interface is a nop. On a failure
1046 * a negative errno code is returned.
1047 */
1048int dev_open(struct net_device *dev)
1049{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001050 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 int ret = 0;
1052
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001053 ASSERT_RTNL();
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 /*
1056 * Is it already up?
1057 */
1058
1059 if (dev->flags & IFF_UP)
1060 return 0;
1061
1062 /*
1063 * Is it even present?
1064 */
1065 if (!netif_device_present(dev))
1066 return -ENODEV;
1067
1068 /*
1069 * Call device private open method
1070 */
1071 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001072
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001073 if (ops->ndo_validate_addr)
1074 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001075
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001076 if (!ret && ops->ndo_open)
1077 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001079 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 * If it went open OK then:
1081 */
1082
Jeff Garzikbada3392007-10-23 20:19:37 -07001083 if (ret)
1084 clear_bit(__LINK_STATE_START, &dev->state);
1085 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * Set the flags.
1088 */
1089 dev->flags |= IFF_UP;
1090
1091 /*
Dan Williams649274d2009-01-11 00:20:39 -08001092 * Enable NET_DMA
1093 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001094 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001095
1096 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 * Initialize multicasting status
1098 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001099 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /*
1102 * Wakeup transmit queue engine
1103 */
1104 dev_activate(dev);
1105
1106 /*
1107 * ... and announce new interface.
1108 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001109 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return ret;
1113}
1114
1115/**
1116 * dev_close - shutdown an interface.
1117 * @dev: device to shutdown
1118 *
1119 * This function moves an active device into down state. A
1120 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1121 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1122 * chain.
1123 */
1124int dev_close(struct net_device *dev)
1125{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001126 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001127 ASSERT_RTNL();
1128
David S. Miller9d5010d2007-09-12 14:33:25 +02001129 might_sleep();
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001150 dev_deactivate(dev);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001159 if (ops->ndo_stop)
1160 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001171 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
Dan Williams649274d2009-01-11 00:20:39 -08001173 /*
1174 * Shutdown NET_DMA
1175 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001176 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 return 0;
1179}
1180
1181
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001182/**
1183 * dev_disable_lro - disable Large Receive Offload on a device
1184 * @dev: device
1185 *
1186 * Disable Large Receive Offload (LRO) on a net device. Must be
1187 * called under RTNL. This is needed if received packets may be
1188 * forwarded to another interface.
1189 */
1190void dev_disable_lro(struct net_device *dev)
1191{
1192 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1193 dev->ethtool_ops->set_flags) {
1194 u32 flags = dev->ethtool_ops->get_flags(dev);
1195 if (flags & ETH_FLAG_LRO) {
1196 flags &= ~ETH_FLAG_LRO;
1197 dev->ethtool_ops->set_flags(dev, flags);
1198 }
1199 }
1200 WARN_ON(dev->features & NETIF_F_LRO);
1201}
1202EXPORT_SYMBOL(dev_disable_lro);
1203
1204
Eric W. Biederman881d9662007-09-17 11:56:21 -07001205static int dev_boot_phase = 1;
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/*
1208 * Device change register/unregister. These are not inline or static
1209 * as we export them to the world.
1210 */
1211
1212/**
1213 * register_netdevice_notifier - register a network notifier block
1214 * @nb: notifier
1215 *
1216 * Register a notifier to be called when network device events occur.
1217 * The notifier passed is linked into the kernel structures and must
1218 * not be reused until it has been unregistered. A negative errno code
1219 * is returned on a failure.
1220 *
1221 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001222 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 * view of the network device list.
1224 */
1225
1226int register_netdevice_notifier(struct notifier_block *nb)
1227{
1228 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001229 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001230 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 int err;
1232
1233 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001234 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001235 if (err)
1236 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 if (dev_boot_phase)
1238 goto unlock;
1239 for_each_net(net) {
1240 for_each_netdev(net, dev) {
1241 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1242 err = notifier_to_errno(err);
1243 if (err)
1244 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Eric W. Biederman881d9662007-09-17 11:56:21 -07001246 if (!(dev->flags & IFF_UP))
1247 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001248
Eric W. Biederman881d9662007-09-17 11:56:21 -07001249 nb->notifier_call(nb, NETDEV_UP, dev);
1250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001252
1253unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 rtnl_unlock();
1255 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001256
1257rollback:
1258 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001259 for_each_net(net) {
1260 for_each_netdev(net, dev) {
1261 if (dev == last)
1262 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
Eric W. Biederman881d9662007-09-17 11:56:21 -07001264 if (dev->flags & IFF_UP) {
1265 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1266 nb->notifier_call(nb, NETDEV_DOWN, dev);
1267 }
1268 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001269 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001270 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001271
1272 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001273 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/**
1277 * unregister_netdevice_notifier - unregister a network notifier block
1278 * @nb: notifier
1279 *
1280 * Unregister a notifier previously registered by
1281 * register_netdevice_notifier(). The notifier is unlinked into the
1282 * kernel structures and may then be reused. A negative errno code
1283 * is returned on a failure.
1284 */
1285
1286int unregister_netdevice_notifier(struct notifier_block *nb)
1287{
Herbert Xu9f514952006-03-25 01:24:25 -08001288 int err;
1289
1290 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001291 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001292 rtnl_unlock();
1293 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
1295
1296/**
1297 * call_netdevice_notifiers - call all network notifier blocks
1298 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001299 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 *
1301 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001302 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
1304
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001305int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001307 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308}
1309
1310/* When > 0 there are consumers of rx skb time stamps */
1311static atomic_t netstamp_needed = ATOMIC_INIT(0);
1312
1313void net_enable_timestamp(void)
1314{
1315 atomic_inc(&netstamp_needed);
1316}
1317
1318void net_disable_timestamp(void)
1319{
1320 atomic_dec(&netstamp_needed);
1321}
1322
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001323static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
1325 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001326 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001327 else
1328 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329}
1330
1331/*
1332 * Support routine. Sends outgoing frames to any network
1333 * taps currently in use.
1334 */
1335
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001336static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337{
1338 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001339
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001340#ifdef CONFIG_NET_CLS_ACT
1341 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1342 net_timestamp(skb);
1343#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001344 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001345#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 rcu_read_lock();
1348 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1349 /* Never send packets back to the socket
1350 * they originated from - MvS (miquels@drinkel.ow.org)
1351 */
1352 if ((ptype->dev == dev || !ptype->dev) &&
1353 (ptype->af_packet_priv == NULL ||
1354 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1355 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1356 if (!skb2)
1357 break;
1358
1359 /* skb->nh should be correctly
1360 set by sender, so that the second statement is
1361 just protection against buggy protocols.
1362 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001363 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001365 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001366 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (net_ratelimit())
1368 printk(KERN_CRIT "protocol %04x is "
1369 "buggy, dev %s\n",
1370 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001371 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 }
1373
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001374 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001376 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 }
1378 }
1379 rcu_read_unlock();
1380}
1381
Denis Vlasenko56079432006-03-29 15:57:29 -08001382
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001383static inline void __netif_reschedule(struct Qdisc *q)
1384{
1385 struct softnet_data *sd;
1386 unsigned long flags;
1387
1388 local_irq_save(flags);
1389 sd = &__get_cpu_var(softnet_data);
1390 q->next_sched = sd->output_queue;
1391 sd->output_queue = q;
1392 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1393 local_irq_restore(flags);
1394}
1395
David S. Miller37437bb2008-07-16 02:15:04 -07001396void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001397{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001398 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1399 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001400}
1401EXPORT_SYMBOL(__netif_schedule);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001405 if (atomic_dec_and_test(&skb->users)) {
1406 struct softnet_data *sd;
1407 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001408
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001409 local_irq_save(flags);
1410 sd = &__get_cpu_var(softnet_data);
1411 skb->next = sd->completion_queue;
1412 sd->completion_queue = skb;
1413 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1414 local_irq_restore(flags);
1415 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001416}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001417EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001418
1419void dev_kfree_skb_any(struct sk_buff *skb)
1420{
1421 if (in_irq() || irqs_disabled())
1422 dev_kfree_skb_irq(skb);
1423 else
1424 dev_kfree_skb(skb);
1425}
1426EXPORT_SYMBOL(dev_kfree_skb_any);
1427
1428
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001429/**
1430 * netif_device_detach - mark device as removed
1431 * @dev: network device
1432 *
1433 * Mark device as removed from system and therefore no longer available.
1434 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001435void netif_device_detach(struct net_device *dev)
1436{
1437 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1438 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001439 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001440 }
1441}
1442EXPORT_SYMBOL(netif_device_detach);
1443
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001444/**
1445 * netif_device_attach - mark device as attached
1446 * @dev: network device
1447 *
1448 * Mark device as attached from system and restart if needed.
1449 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001450void netif_device_attach(struct net_device *dev)
1451{
1452 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1453 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001454 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001456 }
1457}
1458EXPORT_SYMBOL(netif_device_attach);
1459
Ben Hutchings6de329e2008-06-16 17:02:28 -07001460static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1461{
1462 return ((features & NETIF_F_GEN_CSUM) ||
1463 ((features & NETIF_F_IP_CSUM) &&
1464 protocol == htons(ETH_P_IP)) ||
1465 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001466 protocol == htons(ETH_P_IPV6)) ||
1467 ((features & NETIF_F_FCOE_CRC) &&
1468 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001469}
1470
1471static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1472{
1473 if (can_checksum_protocol(dev->features, skb->protocol))
1474 return true;
1475
1476 if (skb->protocol == htons(ETH_P_8021Q)) {
1477 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1478 if (can_checksum_protocol(dev->features & dev->vlan_features,
1479 veh->h_vlan_encapsulated_proto))
1480 return true;
1481 }
1482
1483 return false;
1484}
Denis Vlasenko56079432006-03-29 15:57:29 -08001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486/*
1487 * Invalidate hardware checksum when packet is to be mangled, and
1488 * complete checksum manually on outgoing path.
1489 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001490int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491{
Al Virod3bc23e2006-11-14 21:24:49 -08001492 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001493 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
Patrick McHardy84fa7932006-08-29 16:44:56 -07001495 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001496 goto out_set_summed;
1497
1498 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001499 /* Let GSO fix up the checksum. */
1500 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 }
1502
Herbert Xua0308472007-10-15 01:47:15 -07001503 offset = skb->csum_start - skb_headroom(skb);
1504 BUG_ON(offset >= skb_headlen(skb));
1505 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1506
1507 offset += skb->csum_offset;
1508 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1509
1510 if (skb_cloned(skb) &&
1511 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1513 if (ret)
1514 goto out;
1515 }
1516
Herbert Xua0308472007-10-15 01:47:15 -07001517 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001518out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001520out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 return ret;
1522}
1523
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001524/**
1525 * skb_gso_segment - Perform segmentation on skb.
1526 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001527 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001528 *
1529 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001530 *
1531 * It may return NULL if the skb requires no segmentation. This is
1532 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001533 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001534struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535{
1536 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1537 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001538 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001539 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001541 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001542 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001543 __skb_pull(skb, skb->mac_len);
1544
Herbert Xu67fd1a72009-01-19 16:26:44 -08001545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1546 struct net_device *dev = skb->dev;
1547 struct ethtool_drvinfo info = {};
1548
1549 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1550 dev->ethtool_ops->get_drvinfo(dev, &info);
1551
1552 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1553 "ip_summed=%d",
1554 info.driver, dev ? dev->features : 0L,
1555 skb->sk ? skb->sk->sk_route_caps : 0L,
1556 skb->len, skb->data_len, skb->ip_summed);
1557
Herbert Xua430a432006-07-08 13:34:56 -07001558 if (skb_header_cloned(skb) &&
1559 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1560 return ERR_PTR(err);
1561 }
1562
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001563 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001564 list_for_each_entry_rcu(ptype,
1565 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001566 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001567 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001568 err = ptype->gso_send_check(skb);
1569 segs = ERR_PTR(err);
1570 if (err || skb_gso_ok(skb, features))
1571 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001572 __skb_push(skb, (skb->data -
1573 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001574 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001575 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001576 break;
1577 }
1578 }
1579 rcu_read_unlock();
1580
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001581 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001582
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 return segs;
1584}
1585
1586EXPORT_SYMBOL(skb_gso_segment);
1587
Herbert Xufb286bb2005-11-10 13:01:24 -08001588/* Take action when hardware reception checksum errors are detected. */
1589#ifdef CONFIG_BUG
1590void netdev_rx_csum_fault(struct net_device *dev)
1591{
1592 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001593 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001594 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001595 dump_stack();
1596 }
1597}
1598EXPORT_SYMBOL(netdev_rx_csum_fault);
1599#endif
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601/* Actually, we should eliminate this check as soon as we know, that:
1602 * 1. IOMMU is present and allows to map all the memory.
1603 * 2. No high memory really exists on this machine.
1604 */
1605
1606static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1607{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001608#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 int i;
1610
1611 if (dev->features & NETIF_F_HIGHDMA)
1612 return 0;
1613
1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1615 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1616 return 1;
1617
Herbert Xu3d3a8532006-06-27 13:33:10 -07001618#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 return 0;
1620}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001622struct dev_gso_cb {
1623 void (*destructor)(struct sk_buff *skb);
1624};
1625
1626#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1627
1628static void dev_gso_skb_destructor(struct sk_buff *skb)
1629{
1630 struct dev_gso_cb *cb;
1631
1632 do {
1633 struct sk_buff *nskb = skb->next;
1634
1635 skb->next = nskb->next;
1636 nskb->next = NULL;
1637 kfree_skb(nskb);
1638 } while (skb->next);
1639
1640 cb = DEV_GSO_CB(skb);
1641 if (cb->destructor)
1642 cb->destructor(skb);
1643}
1644
1645/**
1646 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1647 * @skb: buffer to segment
1648 *
1649 * This function segments the given skb and stores the list of segments
1650 * in skb->next.
1651 */
1652static int dev_gso_segment(struct sk_buff *skb)
1653{
1654 struct net_device *dev = skb->dev;
1655 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001656 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1657 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001658
Herbert Xu576a30e2006-06-27 13:22:38 -07001659 segs = skb_gso_segment(skb, features);
1660
1661 /* Verifying header integrity only. */
1662 if (!segs)
1663 return 0;
1664
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001665 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666 return PTR_ERR(segs);
1667
1668 skb->next = segs;
1669 DEV_GSO_CB(skb)->destructor = skb->destructor;
1670 skb->destructor = dev_gso_skb_destructor;
1671
1672 return 0;
1673}
1674
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001675int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1676 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001677{
Stephen Hemminger00829822008-11-20 20:14:53 -08001678 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001679 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001680
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001681 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001682 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001683 dev_queue_xmit_nit(skb, dev);
1684
Herbert Xu576a30e2006-06-27 13:22:38 -07001685 if (netif_needs_gso(dev, skb)) {
1686 if (unlikely(dev_gso_segment(skb)))
1687 goto out_kfree_skb;
1688 if (skb->next)
1689 goto gso;
1690 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001691
Eric Dumazet93f154b2009-05-18 22:19:19 -07001692 /*
1693 * If device doesnt need skb->dst, release it right now while
1694 * its hot in this cpu cache
1695 */
1696 if ((dev->priv_flags & IFF_XMIT_DST_RELEASE) && skb->dst) {
1697 dst_release(skb->dst);
1698 skb->dst = NULL;
1699 }
Patrick Ohlyac45f602009-02-12 05:03:37 +00001700 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001701 if (rc == 0)
1702 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001703 /*
1704 * TODO: if skb_orphan() was called by
1705 * dev->hard_start_xmit() (for example, the unmodified
1706 * igb driver does that; bnx2 doesn't), then
1707 * skb_tx_software_timestamp() will be unable to send
1708 * back the time stamp.
1709 *
1710 * How can this be prevented? Always create another
1711 * reference to the socket before calling
1712 * dev->hard_start_xmit()? Prevent that skb_orphan()
1713 * does anything in dev->hard_start_xmit() by clearing
1714 * the skb destructor before the call and restoring it
1715 * afterwards, then doing the skb_orphan() ourselves?
1716 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001717 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001718 }
1719
Herbert Xu576a30e2006-06-27 13:22:38 -07001720gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001721 do {
1722 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001723
1724 skb->next = nskb->next;
1725 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001726 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001728 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729 skb->next = nskb;
1730 return rc;
1731 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001732 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001733 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001736
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001737 skb->destructor = DEV_GSO_CB(skb)->destructor;
1738
1739out_kfree_skb:
1740 kfree_skb(skb);
1741 return 0;
1742}
1743
David S. Miller70192982009-01-27 16:34:47 -08001744static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001745
Stephen Hemminger92477442009-03-21 13:39:26 -07001746u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001747{
David S. Miller70192982009-01-27 16:34:47 -08001748 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001749
David S. Miller513de112009-05-03 14:43:10 -07001750 if (skb_rx_queue_recorded(skb)) {
1751 hash = skb_get_rx_queue(skb);
1752 while (unlikely (hash >= dev->real_num_tx_queues))
1753 hash -= dev->real_num_tx_queues;
1754 return hash;
1755 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001756
1757 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001758 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001759 else
David S. Miller70192982009-01-27 16:34:47 -08001760 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001761
David S. Miller70192982009-01-27 16:34:47 -08001762 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001763
David S. Millerb6b2fed2008-07-21 09:48:06 -07001764 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001765}
Stephen Hemminger92477442009-03-21 13:39:26 -07001766EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001767
David S. Millere8a04642008-07-17 00:34:19 -07001768static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1769 struct sk_buff *skb)
1770{
Stephen Hemminger00829822008-11-20 20:14:53 -08001771 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001772 u16 queue_index = 0;
1773
Stephen Hemminger00829822008-11-20 20:14:53 -08001774 if (ops->ndo_select_queue)
1775 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001776 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001777 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001778
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001779 skb_set_queue_mapping(skb, queue_index);
1780 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001781}
1782
Dave Jonesd29f7492008-07-22 14:09:06 -07001783/**
1784 * dev_queue_xmit - transmit a buffer
1785 * @skb: buffer to transmit
1786 *
1787 * Queue a buffer for transmission to a network device. The caller must
1788 * have set the device and priority and built the buffer before calling
1789 * this function. The function can be called from an interrupt.
1790 *
1791 * A negative errno code is returned on a failure. A success does not
1792 * guarantee the frame will be transmitted as it may be dropped due
1793 * to congestion or traffic shaping.
1794 *
1795 * -----------------------------------------------------------------------------------
1796 * I notice this method can also return errors from the queue disciplines,
1797 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1798 * be positive.
1799 *
1800 * Regardless of the return value, the skb is consumed, so it is currently
1801 * difficult to retry a send to this method. (You can bump the ref count
1802 * before sending to hold a reference for retry if you are careful.)
1803 *
1804 * When calling this method, interrupts MUST be enabled. This is because
1805 * the BH enable code must have IRQs enabled so that it will not deadlock.
1806 * --BLG
1807 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808int dev_queue_xmit(struct sk_buff *skb)
1809{
1810 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001811 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 struct Qdisc *q;
1813 int rc = -ENOMEM;
1814
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001815 /* GSO will handle the following emulations directly. */
1816 if (netif_needs_gso(dev, skb))
1817 goto gso;
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 if (skb_shinfo(skb)->frag_list &&
1820 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001821 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 goto out_kfree_skb;
1823
1824 /* Fragmented skb is linearized if device does not support SG,
1825 * or if at least one of fragments is in highmem and device
1826 * does not support DMA from it.
1827 */
1828 if (skb_shinfo(skb)->nr_frags &&
1829 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001830 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 goto out_kfree_skb;
1832
1833 /* If packet is not checksummed and device does not support
1834 * checksumming for this protocol, complete checksumming here.
1835 */
Herbert Xu663ead32007-04-09 11:59:07 -07001836 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1837 skb_set_transport_header(skb, skb->csum_start -
1838 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001839 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1840 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001843gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001844 /* Disable soft irqs for various locks below. Also
1845 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001847 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
David S. Millereae792b2008-07-15 03:03:33 -07001849 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001850 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001851
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852#ifdef CONFIG_NET_CLS_ACT
1853 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1854#endif
1855 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001856 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
David S. Miller37437bb2008-07-16 02:15:04 -07001858 spin_lock(root_lock);
1859
David S. Millera9312ae2008-08-17 21:51:03 -07001860 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001861 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001862 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001863 } else {
1864 rc = qdisc_enqueue_root(skb, q);
1865 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001866 }
David S. Miller37437bb2008-07-16 02:15:04 -07001867 spin_unlock(root_lock);
1868
David S. Miller37437bb2008-07-16 02:15:04 -07001869 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871
1872 /* The device has no queue. Common case for software devices:
1873 loopback, all the sorts of tunnels...
1874
Herbert Xu932ff272006-06-09 12:20:56 -07001875 Really, it is unlikely that netif_tx_lock protection is necessary
1876 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 counters.)
1878 However, it is possible, that they rely on protection
1879 made by us here.
1880
1881 Check this and shot the lock. It is not prone from deadlocks.
1882 Either shot noqueue qdisc, it is even simpler 8)
1883 */
1884 if (dev->flags & IFF_UP) {
1885 int cpu = smp_processor_id(); /* ok because BHs are off */
1886
David S. Millerc773e842008-07-08 23:13:53 -07001887 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
David S. Millerc773e842008-07-08 23:13:53 -07001889 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001891 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001893 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001894 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 goto out;
1896 }
1897 }
David S. Millerc773e842008-07-08 23:13:53 -07001898 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 if (net_ratelimit())
1900 printk(KERN_CRIT "Virtual device %s asks to "
1901 "queue packet!\n", dev->name);
1902 } else {
1903 /* Recursion is detected! It is possible,
1904 * unfortunately */
1905 if (net_ratelimit())
1906 printk(KERN_CRIT "Dead loop on virtual device "
1907 "%s, fix it urgently!\n", dev->name);
1908 }
1909 }
1910
1911 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001912 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914out_kfree_skb:
1915 kfree_skb(skb);
1916 return rc;
1917out:
Herbert Xud4828d82006-06-22 02:28:18 -07001918 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 return rc;
1920}
1921
1922
1923/*=======================================================================
1924 Receiver routines
1925 =======================================================================*/
1926
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001927int netdev_max_backlog __read_mostly = 1000;
1928int netdev_budget __read_mostly = 300;
1929int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1932
1933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934/**
1935 * netif_rx - post buffer to the network code
1936 * @skb: buffer to post
1937 *
1938 * This function receives a packet from a device driver and queues it for
1939 * the upper (protocol) levels to process. It always succeeds. The buffer
1940 * may be dropped during processing for congestion control or by the
1941 * protocol layers.
1942 *
1943 * return values:
1944 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 * NET_RX_DROP (packet was dropped)
1946 *
1947 */
1948
1949int netif_rx(struct sk_buff *skb)
1950{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 struct softnet_data *queue;
1952 unsigned long flags;
1953
1954 /* if netpoll wants it, pretend we never saw it */
1955 if (netpoll_rx(skb))
1956 return NET_RX_DROP;
1957
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001958 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001959 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 /*
1962 * The code is rearranged so that the path is the most
1963 * short when CPU is congested, but is still operating.
1964 */
1965 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 queue = &__get_cpu_var(softnet_data);
1967
1968 __get_cpu_var(netdev_rx_stat).total++;
1969 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1970 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001974 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 }
1976
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001977 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 goto enqueue;
1979 }
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 __get_cpu_var(netdev_rx_stat).dropped++;
1982 local_irq_restore(flags);
1983
1984 kfree_skb(skb);
1985 return NET_RX_DROP;
1986}
1987
1988int netif_rx_ni(struct sk_buff *skb)
1989{
1990 int err;
1991
1992 preempt_disable();
1993 err = netif_rx(skb);
1994 if (local_softirq_pending())
1995 do_softirq();
1996 preempt_enable();
1997
1998 return err;
1999}
2000
2001EXPORT_SYMBOL(netif_rx_ni);
2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003static void net_tx_action(struct softirq_action *h)
2004{
2005 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2006
2007 if (sd->completion_queue) {
2008 struct sk_buff *clist;
2009
2010 local_irq_disable();
2011 clist = sd->completion_queue;
2012 sd->completion_queue = NULL;
2013 local_irq_enable();
2014
2015 while (clist) {
2016 struct sk_buff *skb = clist;
2017 clist = clist->next;
2018
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002019 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 __kfree_skb(skb);
2021 }
2022 }
2023
2024 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002025 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027 local_irq_disable();
2028 head = sd->output_queue;
2029 sd->output_queue = NULL;
2030 local_irq_enable();
2031
2032 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002033 struct Qdisc *q = head;
2034 spinlock_t *root_lock;
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 head = head->next_sched;
2037
David S. Miller5fb66222008-08-02 20:02:43 -07002038 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002039 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002040 smp_mb__before_clear_bit();
2041 clear_bit(__QDISC_STATE_SCHED,
2042 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002043 qdisc_run(q);
2044 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002046 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002047 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002048 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002049 } else {
2050 smp_mb__before_clear_bit();
2051 clear_bit(__QDISC_STATE_SCHED,
2052 &q->state);
2053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 }
2055 }
2056 }
2057}
2058
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002059static inline int deliver_skb(struct sk_buff *skb,
2060 struct packet_type *pt_prev,
2061 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
2063 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002064 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065}
2066
2067#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002068/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069struct net_bridge;
2070struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2071 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002072void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Stephen Hemminger6229e362007-03-21 13:38:47 -07002074/*
2075 * If bridge module is loaded call bridging hook.
2076 * returns NULL if packet was consumed.
2077 */
2078struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2079 struct sk_buff *skb) __read_mostly;
2080static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2081 struct packet_type **pt_prev, int *ret,
2082 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
2084 struct net_bridge_port *port;
2085
Stephen Hemminger6229e362007-03-21 13:38:47 -07002086 if (skb->pkt_type == PACKET_LOOPBACK ||
2087 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2088 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002091 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002093 }
2094
Stephen Hemminger6229e362007-03-21 13:38:47 -07002095 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096}
2097#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002098#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099#endif
2100
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002101#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2102struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2103EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2104
2105static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2106 struct packet_type **pt_prev,
2107 int *ret,
2108 struct net_device *orig_dev)
2109{
2110 if (skb->dev->macvlan_port == NULL)
2111 return skb;
2112
2113 if (*pt_prev) {
2114 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2115 *pt_prev = NULL;
2116 }
2117 return macvlan_handle_frame_hook(skb);
2118}
2119#else
2120#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2121#endif
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123#ifdef CONFIG_NET_CLS_ACT
2124/* TODO: Maybe we should just force sch_ingress to be compiled in
2125 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2126 * a compare and 2 stores extra right now if we dont have it on
2127 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002128 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 * the ingress scheduler, you just cant add policies on ingress.
2130 *
2131 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002132static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002135 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002136 struct netdev_queue *rxq;
2137 int result = TC_ACT_OK;
2138 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002139
Herbert Xuf697c3e2007-10-14 00:38:47 -07002140 if (MAX_RED_LOOP < ttl++) {
2141 printk(KERN_WARNING
2142 "Redir loop detected Dropping packet (%d->%d)\n",
2143 skb->iif, dev->ifindex);
2144 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 }
2146
Herbert Xuf697c3e2007-10-14 00:38:47 -07002147 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2148 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2149
David S. Miller555353c2008-07-08 17:33:13 -07002150 rxq = &dev->rx_queue;
2151
David S. Miller83874002008-07-17 00:53:03 -07002152 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002153 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002154 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002155 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2156 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002157 spin_unlock(qdisc_lock(q));
2158 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return result;
2161}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002162
2163static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2164 struct packet_type **pt_prev,
2165 int *ret, struct net_device *orig_dev)
2166{
David S. Miller8d50b532008-07-30 02:37:46 -07002167 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002168 goto out;
2169
2170 if (*pt_prev) {
2171 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2172 *pt_prev = NULL;
2173 } else {
2174 /* Huh? Why does turning on AF_PACKET affect this? */
2175 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2176 }
2177
2178 switch (ing_filter(skb)) {
2179 case TC_ACT_SHOT:
2180 case TC_ACT_STOLEN:
2181 kfree_skb(skb);
2182 return NULL;
2183 }
2184
2185out:
2186 skb->tc_verd = 0;
2187 return skb;
2188}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189#endif
2190
Patrick McHardybc1d0412008-07-14 22:49:30 -07002191/*
2192 * netif_nit_deliver - deliver received packets to network taps
2193 * @skb: buffer
2194 *
2195 * This function is used to deliver incoming packets to network
2196 * taps. It should be used when the normal netif_receive_skb path
2197 * is bypassed, for example because of VLAN acceleration.
2198 */
2199void netif_nit_deliver(struct sk_buff *skb)
2200{
2201 struct packet_type *ptype;
2202
2203 if (list_empty(&ptype_all))
2204 return;
2205
2206 skb_reset_network_header(skb);
2207 skb_reset_transport_header(skb);
2208 skb->mac_len = skb->network_header - skb->mac_header;
2209
2210 rcu_read_lock();
2211 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2212 if (!ptype->dev || ptype->dev == skb->dev)
2213 deliver_skb(skb, ptype, skb->dev);
2214 }
2215 rcu_read_unlock();
2216}
2217
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002218/**
2219 * netif_receive_skb - process receive buffer from network
2220 * @skb: buffer to process
2221 *
2222 * netif_receive_skb() is the main receive data processing function.
2223 * It always succeeds. The buffer may be dropped during processing
2224 * for congestion control or by the protocol layers.
2225 *
2226 * This function may only be called from softirq context and interrupts
2227 * should be enabled.
2228 *
2229 * Return values (usually ignored):
2230 * NET_RX_SUCCESS: no congestion
2231 * NET_RX_DROP: packet was dropped
2232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233int netif_receive_skb(struct sk_buff *skb)
2234{
2235 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002236 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002237 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002239 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002241 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2242 return NET_RX_SUCCESS;
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002245 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return NET_RX_DROP;
2247
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002248 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002249 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Patrick McHardyc01003c2007-03-29 11:46:52 -07002251 if (!skb->iif)
2252 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002253
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002254 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002255 orig_dev = skb->dev;
2256 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002257 if (skb_bond_should_drop(skb))
2258 null_or_orig = orig_dev; /* deliver only exact match */
2259 else
2260 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002261 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 __get_cpu_var(netdev_rx_stat).total++;
2264
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002265 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002266 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002267 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268
2269 pt_prev = NULL;
2270
2271 rcu_read_lock();
2272
2273#ifdef CONFIG_NET_CLS_ACT
2274 if (skb->tc_verd & TC_NCLS) {
2275 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2276 goto ncls;
2277 }
2278#endif
2279
2280 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002281 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2282 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002283 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002284 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 pt_prev = ptype;
2286 }
2287 }
2288
2289#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002290 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2291 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293ncls:
2294#endif
2295
Stephen Hemminger6229e362007-03-21 13:38:47 -07002296 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2297 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002299 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2300 if (!skb)
2301 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Herbert Xu9a279bc2009-02-04 16:55:27 -08002303 skb_orphan(skb);
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002306 list_for_each_entry_rcu(ptype,
2307 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002309 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2310 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002311 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002312 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 pt_prev = ptype;
2314 }
2315 }
2316
2317 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002318 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 } else {
2320 kfree_skb(skb);
2321 /* Jamal, now you will not able to escape explaining
2322 * me how you were going to use this. :-)
2323 */
2324 ret = NET_RX_DROP;
2325 }
2326
2327out:
2328 rcu_read_unlock();
2329 return ret;
2330}
2331
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002332/* Network device is going away, flush any packets still pending */
2333static void flush_backlog(void *arg)
2334{
2335 struct net_device *dev = arg;
2336 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2337 struct sk_buff *skb, *tmp;
2338
2339 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2340 if (skb->dev == dev) {
2341 __skb_unlink(skb, &queue->input_pkt_queue);
2342 kfree_skb(skb);
2343 }
2344}
2345
Herbert Xud565b0a2008-12-15 23:38:52 -08002346static int napi_gro_complete(struct sk_buff *skb)
2347{
2348 struct packet_type *ptype;
2349 __be16 type = skb->protocol;
2350 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2351 int err = -ENOENT;
2352
Herbert Xufc59f9a2009-04-14 15:11:06 -07002353 if (NAPI_GRO_CB(skb)->count == 1) {
2354 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002355 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002356 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002357
2358 rcu_read_lock();
2359 list_for_each_entry_rcu(ptype, head, list) {
2360 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2361 continue;
2362
2363 err = ptype->gro_complete(skb);
2364 break;
2365 }
2366 rcu_read_unlock();
2367
2368 if (err) {
2369 WARN_ON(&ptype->list == head);
2370 kfree_skb(skb);
2371 return NET_RX_SUCCESS;
2372 }
2373
2374out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002375 return netif_receive_skb(skb);
2376}
2377
2378void napi_gro_flush(struct napi_struct *napi)
2379{
2380 struct sk_buff *skb, *next;
2381
2382 for (skb = napi->gro_list; skb; skb = next) {
2383 next = skb->next;
2384 skb->next = NULL;
2385 napi_gro_complete(skb);
2386 }
2387
Herbert Xu4ae55442009-02-08 18:00:36 +00002388 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002389 napi->gro_list = NULL;
2390}
2391EXPORT_SYMBOL(napi_gro_flush);
2392
Herbert Xu96e93ea2009-01-06 10:49:34 -08002393int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002394{
2395 struct sk_buff **pp = NULL;
2396 struct packet_type *ptype;
2397 __be16 type = skb->protocol;
2398 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002399 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002400 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002401 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002402
2403 if (!(skb->dev->features & NETIF_F_GRO))
2404 goto normal;
2405
Herbert Xuf17f5c92009-01-14 14:36:12 -08002406 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2407 goto normal;
2408
Herbert Xud565b0a2008-12-15 23:38:52 -08002409 rcu_read_lock();
2410 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002411 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2412 continue;
2413
Herbert Xu86911732009-01-29 14:19:50 +00002414 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002415 mac_len = skb->network_header - skb->mac_header;
2416 skb->mac_len = mac_len;
2417 NAPI_GRO_CB(skb)->same_flow = 0;
2418 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002419 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002420
Herbert Xud565b0a2008-12-15 23:38:52 -08002421 pp = ptype->gro_receive(&napi->gro_list, skb);
2422 break;
2423 }
2424 rcu_read_unlock();
2425
2426 if (&ptype->list == head)
2427 goto normal;
2428
Herbert Xu0da2afd52008-12-26 14:57:42 -08002429 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002430 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002431
Herbert Xud565b0a2008-12-15 23:38:52 -08002432 if (pp) {
2433 struct sk_buff *nskb = *pp;
2434
2435 *pp = nskb->next;
2436 nskb->next = NULL;
2437 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002438 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002439 }
2440
Herbert Xu0da2afd52008-12-26 14:57:42 -08002441 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002442 goto ok;
2443
Herbert Xu4ae55442009-02-08 18:00:36 +00002444 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002445 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002446
Herbert Xu4ae55442009-02-08 18:00:36 +00002447 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002448 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002449 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002450 skb->next = napi->gro_list;
2451 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002452 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002453
Herbert Xuad0f9902009-02-01 01:24:55 -08002454pull:
Herbert Xucb189782009-05-26 18:50:31 +00002455 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2456 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2457
2458 BUG_ON(skb->end - skb->tail < grow);
2459
2460 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2461
2462 skb->tail += grow;
2463 skb->data_len -= grow;
2464
2465 skb_shinfo(skb)->frags[0].page_offset += grow;
2466 skb_shinfo(skb)->frags[0].size -= grow;
2467
2468 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2469 put_page(skb_shinfo(skb)->frags[0].page);
2470 memmove(skb_shinfo(skb)->frags,
2471 skb_shinfo(skb)->frags + 1,
2472 --skb_shinfo(skb)->nr_frags);
2473 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002474 }
2475
Herbert Xud565b0a2008-12-15 23:38:52 -08002476ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002477 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002478
2479normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002480 ret = GRO_NORMAL;
2481 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002482}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002483EXPORT_SYMBOL(dev_gro_receive);
2484
2485static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2486{
2487 struct sk_buff *p;
2488
Herbert Xud1c76af2009-03-16 10:50:02 -07002489 if (netpoll_rx_on(skb))
2490 return GRO_NORMAL;
2491
Herbert Xu96e93ea2009-01-06 10:49:34 -08002492 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002493 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2494 && !compare_ether_header(skb_mac_header(p),
2495 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002496 NAPI_GRO_CB(p)->flush = 0;
2497 }
2498
2499 return dev_gro_receive(napi, skb);
2500}
Herbert Xu5d38a072009-01-04 16:13:40 -08002501
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002502int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002503{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002504 int err = NET_RX_SUCCESS;
2505
2506 switch (ret) {
2507 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002508 return netif_receive_skb(skb);
2509
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002510 case GRO_DROP:
2511 err = NET_RX_DROP;
2512 /* fall through */
2513
2514 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002515 kfree_skb(skb);
2516 break;
2517 }
2518
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002519 return err;
2520}
2521EXPORT_SYMBOL(napi_skb_finish);
2522
Herbert Xu78a478d2009-05-26 18:50:21 +00002523void skb_gro_reset_offset(struct sk_buff *skb)
2524{
2525 NAPI_GRO_CB(skb)->data_offset = 0;
2526 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002527 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002528
Herbert Xu78d3fd02009-05-26 18:50:23 +00002529 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002530 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002531 NAPI_GRO_CB(skb)->frag0 =
2532 page_address(skb_shinfo(skb)->frags[0].page) +
2533 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002534 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2535 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002536}
2537EXPORT_SYMBOL(skb_gro_reset_offset);
2538
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002539int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2540{
Herbert Xu86911732009-01-29 14:19:50 +00002541 skb_gro_reset_offset(skb);
2542
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002543 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002544}
2545EXPORT_SYMBOL(napi_gro_receive);
2546
Herbert Xu96e93ea2009-01-06 10:49:34 -08002547void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2548{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002549 __skb_pull(skb, skb_headlen(skb));
2550 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2551
2552 napi->skb = skb;
2553}
2554EXPORT_SYMBOL(napi_reuse_skb);
2555
Herbert Xu76620aa2009-04-16 02:02:07 -07002556struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002557{
2558 struct net_device *dev = napi->dev;
2559 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002560
2561 if (!skb) {
2562 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2563 if (!skb)
2564 goto out;
2565
2566 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002567
2568 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002569 }
2570
Herbert Xu96e93ea2009-01-06 10:49:34 -08002571out:
2572 return skb;
2573}
Herbert Xu76620aa2009-04-16 02:02:07 -07002574EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002575
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002576int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2577{
2578 int err = NET_RX_SUCCESS;
2579
2580 switch (ret) {
2581 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002582 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002583 skb->protocol = eth_type_trans(skb, napi->dev);
2584
2585 if (ret == GRO_NORMAL)
2586 return netif_receive_skb(skb);
2587
2588 skb_gro_pull(skb, -ETH_HLEN);
2589 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002590
2591 case GRO_DROP:
2592 err = NET_RX_DROP;
2593 /* fall through */
2594
2595 case GRO_MERGED_FREE:
2596 napi_reuse_skb(napi, skb);
2597 break;
2598 }
2599
2600 return err;
2601}
2602EXPORT_SYMBOL(napi_frags_finish);
2603
Herbert Xu76620aa2009-04-16 02:02:07 -07002604struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002605{
Herbert Xu76620aa2009-04-16 02:02:07 -07002606 struct sk_buff *skb = napi->skb;
2607 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002608 unsigned int hlen;
2609 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002610
2611 napi->skb = NULL;
2612
2613 skb_reset_mac_header(skb);
2614 skb_gro_reset_offset(skb);
2615
Herbert Xua5b1cf22009-05-26 18:50:28 +00002616 off = skb_gro_offset(skb);
2617 hlen = off + sizeof(*eth);
2618 eth = skb_gro_header_fast(skb, off);
2619 if (skb_gro_header_hard(skb, hlen)) {
2620 eth = skb_gro_header_slow(skb, hlen, off);
2621 if (unlikely(!eth)) {
2622 napi_reuse_skb(napi, skb);
2623 skb = NULL;
2624 goto out;
2625 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002626 }
2627
2628 skb_gro_pull(skb, sizeof(*eth));
2629
2630 /*
2631 * This works because the only protocols we care about don't require
2632 * special handling. We'll fix it up properly at the end.
2633 */
2634 skb->protocol = eth->h_proto;
2635
2636out:
2637 return skb;
2638}
2639EXPORT_SYMBOL(napi_frags_skb);
2640
2641int napi_gro_frags(struct napi_struct *napi)
2642{
2643 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002644
2645 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002646 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002647
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002648 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002649}
2650EXPORT_SYMBOL(napi_gro_frags);
2651
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002652static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653{
2654 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2656 unsigned long start_time = jiffies;
2657
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002658 napi->weight = weight_p;
2659 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 local_irq_disable();
2663 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002664 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002665 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002666 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002667 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002668 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 local_irq_enable();
2670
Herbert Xu8f1ead22009-03-26 00:59:10 -07002671 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002672 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002674 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
2676
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002677/**
2678 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002679 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002680 *
2681 * The entry's receive function will be scheduled to run
2682 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002683void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002684{
2685 unsigned long flags;
2686
2687 local_irq_save(flags);
2688 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2689 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2690 local_irq_restore(flags);
2691}
2692EXPORT_SYMBOL(__napi_schedule);
2693
Herbert Xud565b0a2008-12-15 23:38:52 -08002694void __napi_complete(struct napi_struct *n)
2695{
2696 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2697 BUG_ON(n->gro_list);
2698
2699 list_del(&n->poll_list);
2700 smp_mb__before_clear_bit();
2701 clear_bit(NAPI_STATE_SCHED, &n->state);
2702}
2703EXPORT_SYMBOL(__napi_complete);
2704
2705void napi_complete(struct napi_struct *n)
2706{
2707 unsigned long flags;
2708
2709 /*
2710 * don't let napi dequeue from the cpu poll list
2711 * just in case its running on a different cpu
2712 */
2713 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2714 return;
2715
2716 napi_gro_flush(n);
2717 local_irq_save(flags);
2718 __napi_complete(n);
2719 local_irq_restore(flags);
2720}
2721EXPORT_SYMBOL(napi_complete);
2722
2723void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2724 int (*poll)(struct napi_struct *, int), int weight)
2725{
2726 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002727 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002728 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002729 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002730 napi->poll = poll;
2731 napi->weight = weight;
2732 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002733 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002734#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002735 spin_lock_init(&napi->poll_lock);
2736 napi->poll_owner = -1;
2737#endif
2738 set_bit(NAPI_STATE_SCHED, &napi->state);
2739}
2740EXPORT_SYMBOL(netif_napi_add);
2741
2742void netif_napi_del(struct napi_struct *napi)
2743{
2744 struct sk_buff *skb, *next;
2745
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002746 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002747 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002748
2749 for (skb = napi->gro_list; skb; skb = next) {
2750 next = skb->next;
2751 skb->next = NULL;
2752 kfree_skb(skb);
2753 }
2754
2755 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002756 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002757}
2758EXPORT_SYMBOL(netif_napi_del);
2759
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002760
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761static void net_rx_action(struct softirq_action *h)
2762{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002763 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002764 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002765 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002766 void *have;
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 local_irq_disable();
2769
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002770 while (!list_empty(list)) {
2771 struct napi_struct *n;
2772 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002774 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002775 * Allow this to run for 2 jiffies since which will allow
2776 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002777 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002778 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 goto softnet_break;
2780
2781 local_irq_enable();
2782
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002783 /* Even though interrupts have been re-enabled, this
2784 * access is safe because interrupts can only add new
2785 * entries to the tail of this list, and only ->poll()
2786 * calls can remove this head entry from the list.
2787 */
2788 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002790 have = netpoll_poll_lock(n);
2791
2792 weight = n->weight;
2793
David S. Miller0a7606c2007-10-29 21:28:47 -07002794 /* This NAPI_STATE_SCHED test is for avoiding a race
2795 * with netpoll's poll_napi(). Only the entity which
2796 * obtains the lock and sees NAPI_STATE_SCHED set will
2797 * actually make the ->poll() call. Therefore we avoid
2798 * accidently calling ->poll() when NAPI is not scheduled.
2799 */
2800 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002801 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002802 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002803 trace_napi_poll(n);
2804 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002805
2806 WARN_ON_ONCE(work > weight);
2807
2808 budget -= work;
2809
2810 local_irq_disable();
2811
2812 /* Drivers must not modify the NAPI state if they
2813 * consume the entire weight. In such cases this code
2814 * still "owns" the NAPI instance and therefore can
2815 * move the instance around on the list at-will.
2816 */
David S. Millerfed17f32008-01-07 21:00:40 -08002817 if (unlikely(work == weight)) {
2818 if (unlikely(napi_disable_pending(n)))
2819 __napi_complete(n);
2820 else
2821 list_move_tail(&n->poll_list, list);
2822 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002823
2824 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 }
2826out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002827 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002828
Chris Leechdb217332006-06-17 21:24:58 -07002829#ifdef CONFIG_NET_DMA
2830 /*
2831 * There may not be any more sk_buffs coming right now, so push
2832 * any pending DMA copies to hardware
2833 */
Dan Williams2ba05622009-01-06 11:38:14 -07002834 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002835#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 return;
2838
2839softnet_break:
2840 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2841 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2842 goto out;
2843}
2844
2845static gifconf_func_t * gifconf_list [NPROTO];
2846
2847/**
2848 * register_gifconf - register a SIOCGIF handler
2849 * @family: Address family
2850 * @gifconf: Function handler
2851 *
2852 * Register protocol dependent address dumping routines. The handler
2853 * that is passed must not be freed or reused until it has been replaced
2854 * by another handler.
2855 */
2856int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2857{
2858 if (family >= NPROTO)
2859 return -EINVAL;
2860 gifconf_list[family] = gifconf;
2861 return 0;
2862}
2863
2864
2865/*
2866 * Map an interface index to its name (SIOCGIFNAME)
2867 */
2868
2869/*
2870 * We need this ioctl for efficient implementation of the
2871 * if_indextoname() function required by the IPv6 API. Without
2872 * it, we would have to search all the interfaces to find a
2873 * match. --pb
2874 */
2875
Eric W. Biederman881d9662007-09-17 11:56:21 -07002876static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877{
2878 struct net_device *dev;
2879 struct ifreq ifr;
2880
2881 /*
2882 * Fetch the caller's info block.
2883 */
2884
2885 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2886 return -EFAULT;
2887
2888 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002889 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 if (!dev) {
2891 read_unlock(&dev_base_lock);
2892 return -ENODEV;
2893 }
2894
2895 strcpy(ifr.ifr_name, dev->name);
2896 read_unlock(&dev_base_lock);
2897
2898 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2899 return -EFAULT;
2900 return 0;
2901}
2902
2903/*
2904 * Perform a SIOCGIFCONF call. This structure will change
2905 * size eventually, and there is nothing I can do about it.
2906 * Thus we will need a 'compatibility mode'.
2907 */
2908
Eric W. Biederman881d9662007-09-17 11:56:21 -07002909static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
2911 struct ifconf ifc;
2912 struct net_device *dev;
2913 char __user *pos;
2914 int len;
2915 int total;
2916 int i;
2917
2918 /*
2919 * Fetch the caller's info block.
2920 */
2921
2922 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2923 return -EFAULT;
2924
2925 pos = ifc.ifc_buf;
2926 len = ifc.ifc_len;
2927
2928 /*
2929 * Loop over the interfaces, and write an info block for each.
2930 */
2931
2932 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002933 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 for (i = 0; i < NPROTO; i++) {
2935 if (gifconf_list[i]) {
2936 int done;
2937 if (!pos)
2938 done = gifconf_list[i](dev, NULL, 0);
2939 else
2940 done = gifconf_list[i](dev, pos + total,
2941 len - total);
2942 if (done < 0)
2943 return -EFAULT;
2944 total += done;
2945 }
2946 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 /*
2950 * All done. Write the updated control block back to the caller.
2951 */
2952 ifc.ifc_len = total;
2953
2954 /*
2955 * Both BSD and Solaris return 0 here, so we do too.
2956 */
2957 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2958}
2959
2960#ifdef CONFIG_PROC_FS
2961/*
2962 * This is invoked by the /proc filesystem handler to display a device
2963 * in detail.
2964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002966 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967{
Denis V. Luneve372c412007-11-19 22:31:54 -08002968 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002969 loff_t off;
2970 struct net_device *dev;
2971
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002973 if (!*pos)
2974 return SEQ_START_TOKEN;
2975
2976 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002977 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002978 if (off++ == *pos)
2979 return dev;
2980
2981 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982}
2983
2984void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2985{
Denis V. Luneve372c412007-11-19 22:31:54 -08002986 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002988 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002989 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990}
2991
2992void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002993 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994{
2995 read_unlock(&dev_base_lock);
2996}
2997
2998static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2999{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003000 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Rusty Russell5a1b5892007-04-28 21:04:03 -07003002 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3003 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3004 dev->name, stats->rx_bytes, stats->rx_packets,
3005 stats->rx_errors,
3006 stats->rx_dropped + stats->rx_missed_errors,
3007 stats->rx_fifo_errors,
3008 stats->rx_length_errors + stats->rx_over_errors +
3009 stats->rx_crc_errors + stats->rx_frame_errors,
3010 stats->rx_compressed, stats->multicast,
3011 stats->tx_bytes, stats->tx_packets,
3012 stats->tx_errors, stats->tx_dropped,
3013 stats->tx_fifo_errors, stats->collisions,
3014 stats->tx_carrier_errors +
3015 stats->tx_aborted_errors +
3016 stats->tx_window_errors +
3017 stats->tx_heartbeat_errors,
3018 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019}
3020
3021/*
3022 * Called from the PROCfs module. This now uses the new arbitrary sized
3023 * /proc/net interface to create /proc/net/dev
3024 */
3025static int dev_seq_show(struct seq_file *seq, void *v)
3026{
3027 if (v == SEQ_START_TOKEN)
3028 seq_puts(seq, "Inter-| Receive "
3029 " | Transmit\n"
3030 " face |bytes packets errs drop fifo frame "
3031 "compressed multicast|bytes packets errs "
3032 "drop fifo colls carrier compressed\n");
3033 else
3034 dev_seq_printf_stats(seq, v);
3035 return 0;
3036}
3037
3038static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3039{
3040 struct netif_rx_stats *rc = NULL;
3041
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003042 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003043 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 rc = &per_cpu(netdev_rx_stat, *pos);
3045 break;
3046 } else
3047 ++*pos;
3048 return rc;
3049}
3050
3051static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3052{
3053 return softnet_get_online(pos);
3054}
3055
3056static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3057{
3058 ++*pos;
3059 return softnet_get_online(pos);
3060}
3061
3062static void softnet_seq_stop(struct seq_file *seq, void *v)
3063{
3064}
3065
3066static int softnet_seq_show(struct seq_file *seq, void *v)
3067{
3068 struct netif_rx_stats *s = v;
3069
3070 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003071 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003072 0, 0, 0, 0, /* was fastroute */
3073 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003074 return 0;
3075}
3076
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003077static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 .start = dev_seq_start,
3079 .next = dev_seq_next,
3080 .stop = dev_seq_stop,
3081 .show = dev_seq_show,
3082};
3083
3084static int dev_seq_open(struct inode *inode, struct file *file)
3085{
Denis V. Luneve372c412007-11-19 22:31:54 -08003086 return seq_open_net(inode, file, &dev_seq_ops,
3087 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088}
3089
Arjan van de Ven9a321442007-02-12 00:55:35 -08003090static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 .owner = THIS_MODULE,
3092 .open = dev_seq_open,
3093 .read = seq_read,
3094 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003095 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096};
3097
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003098static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099 .start = softnet_seq_start,
3100 .next = softnet_seq_next,
3101 .stop = softnet_seq_stop,
3102 .show = softnet_seq_show,
3103};
3104
3105static int softnet_seq_open(struct inode *inode, struct file *file)
3106{
3107 return seq_open(file, &softnet_seq_ops);
3108}
3109
Arjan van de Ven9a321442007-02-12 00:55:35 -08003110static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 .owner = THIS_MODULE,
3112 .open = softnet_seq_open,
3113 .read = seq_read,
3114 .llseek = seq_lseek,
3115 .release = seq_release,
3116};
3117
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003118static void *ptype_get_idx(loff_t pos)
3119{
3120 struct packet_type *pt = NULL;
3121 loff_t i = 0;
3122 int t;
3123
3124 list_for_each_entry_rcu(pt, &ptype_all, list) {
3125 if (i == pos)
3126 return pt;
3127 ++i;
3128 }
3129
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003130 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003131 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3132 if (i == pos)
3133 return pt;
3134 ++i;
3135 }
3136 }
3137 return NULL;
3138}
3139
3140static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003141 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003142{
3143 rcu_read_lock();
3144 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3145}
3146
3147static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3148{
3149 struct packet_type *pt;
3150 struct list_head *nxt;
3151 int hash;
3152
3153 ++*pos;
3154 if (v == SEQ_START_TOKEN)
3155 return ptype_get_idx(0);
3156
3157 pt = v;
3158 nxt = pt->list.next;
3159 if (pt->type == htons(ETH_P_ALL)) {
3160 if (nxt != &ptype_all)
3161 goto found;
3162 hash = 0;
3163 nxt = ptype_base[0].next;
3164 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003165 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003166
3167 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003168 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003169 return NULL;
3170 nxt = ptype_base[hash].next;
3171 }
3172found:
3173 return list_entry(nxt, struct packet_type, list);
3174}
3175
3176static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003177 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003178{
3179 rcu_read_unlock();
3180}
3181
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003182static int ptype_seq_show(struct seq_file *seq, void *v)
3183{
3184 struct packet_type *pt = v;
3185
3186 if (v == SEQ_START_TOKEN)
3187 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003188 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003189 if (pt->type == htons(ETH_P_ALL))
3190 seq_puts(seq, "ALL ");
3191 else
3192 seq_printf(seq, "%04x", ntohs(pt->type));
3193
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003194 seq_printf(seq, " %-8s %pF\n",
3195 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003196 }
3197
3198 return 0;
3199}
3200
3201static const struct seq_operations ptype_seq_ops = {
3202 .start = ptype_seq_start,
3203 .next = ptype_seq_next,
3204 .stop = ptype_seq_stop,
3205 .show = ptype_seq_show,
3206};
3207
3208static int ptype_seq_open(struct inode *inode, struct file *file)
3209{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003210 return seq_open_net(inode, file, &ptype_seq_ops,
3211 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003212}
3213
3214static const struct file_operations ptype_seq_fops = {
3215 .owner = THIS_MODULE,
3216 .open = ptype_seq_open,
3217 .read = seq_read,
3218 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003219 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003220};
3221
3222
Pavel Emelyanov46650792007-10-08 20:38:39 -07003223static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224{
3225 int rc = -ENOMEM;
3226
Eric W. Biederman881d9662007-09-17 11:56:21 -07003227 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003229 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003231 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003232 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003233
Eric W. Biederman881d9662007-09-17 11:56:21 -07003234 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003235 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 rc = 0;
3237out:
3238 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003239out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003240 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003242 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003244 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 goto out;
3246}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003247
Pavel Emelyanov46650792007-10-08 20:38:39 -07003248static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003249{
3250 wext_proc_exit(net);
3251
3252 proc_net_remove(net, "ptype");
3253 proc_net_remove(net, "softnet_stat");
3254 proc_net_remove(net, "dev");
3255}
3256
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003257static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003258 .init = dev_proc_net_init,
3259 .exit = dev_proc_net_exit,
3260};
3261
3262static int __init dev_proc_init(void)
3263{
3264 return register_pernet_subsys(&dev_proc_ops);
3265}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266#else
3267#define dev_proc_init() 0
3268#endif /* CONFIG_PROC_FS */
3269
3270
3271/**
3272 * netdev_set_master - set up master/slave pair
3273 * @slave: slave device
3274 * @master: new master device
3275 *
3276 * Changes the master device of the slave. Pass %NULL to break the
3277 * bonding. The caller must hold the RTNL semaphore. On a failure
3278 * a negative errno code is returned. On success the reference counts
3279 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3280 * function returns zero.
3281 */
3282int netdev_set_master(struct net_device *slave, struct net_device *master)
3283{
3284 struct net_device *old = slave->master;
3285
3286 ASSERT_RTNL();
3287
3288 if (master) {
3289 if (old)
3290 return -EBUSY;
3291 dev_hold(master);
3292 }
3293
3294 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003295
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 synchronize_net();
3297
3298 if (old)
3299 dev_put(old);
3300
3301 if (master)
3302 slave->flags |= IFF_SLAVE;
3303 else
3304 slave->flags &= ~IFF_SLAVE;
3305
3306 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3307 return 0;
3308}
3309
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003310static void dev_change_rx_flags(struct net_device *dev, int flags)
3311{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003312 const struct net_device_ops *ops = dev->netdev_ops;
3313
3314 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3315 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003316}
3317
Wang Chendad9b332008-06-18 01:48:28 -07003318static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003319{
3320 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003321 uid_t uid;
3322 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003323
Patrick McHardy24023452007-07-14 18:51:31 -07003324 ASSERT_RTNL();
3325
Wang Chendad9b332008-06-18 01:48:28 -07003326 dev->flags |= IFF_PROMISC;
3327 dev->promiscuity += inc;
3328 if (dev->promiscuity == 0) {
3329 /*
3330 * Avoid overflow.
3331 * If inc causes overflow, untouch promisc and return error.
3332 */
3333 if (inc < 0)
3334 dev->flags &= ~IFF_PROMISC;
3335 else {
3336 dev->promiscuity -= inc;
3337 printk(KERN_WARNING "%s: promiscuity touches roof, "
3338 "set promiscuity failed, promiscuity feature "
3339 "of device might be broken.\n", dev->name);
3340 return -EOVERFLOW;
3341 }
3342 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003343 if (dev->flags != old_flags) {
3344 printk(KERN_INFO "device %s %s promiscuous mode\n",
3345 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3346 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003347 if (audit_enabled) {
3348 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003349 audit_log(current->audit_context, GFP_ATOMIC,
3350 AUDIT_ANOM_PROMISCUOUS,
3351 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3352 dev->name, (dev->flags & IFF_PROMISC),
3353 (old_flags & IFF_PROMISC),
3354 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003355 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003356 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003357 }
Patrick McHardy24023452007-07-14 18:51:31 -07003358
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003359 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003360 }
Wang Chendad9b332008-06-18 01:48:28 -07003361 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003362}
3363
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364/**
3365 * dev_set_promiscuity - update promiscuity count on a device
3366 * @dev: device
3367 * @inc: modifier
3368 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003369 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 * remains above zero the interface remains promiscuous. Once it hits zero
3371 * the device reverts back to normal filtering operation. A negative inc
3372 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003373 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 */
Wang Chendad9b332008-06-18 01:48:28 -07003375int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376{
3377 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003378 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379
Wang Chendad9b332008-06-18 01:48:28 -07003380 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003381 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003382 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003383 if (dev->flags != old_flags)
3384 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003385 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386}
3387
3388/**
3389 * dev_set_allmulti - update allmulti count on a device
3390 * @dev: device
3391 * @inc: modifier
3392 *
3393 * Add or remove reception of all multicast frames to a device. While the
3394 * count in the device remains above zero the interface remains listening
3395 * to all interfaces. Once it hits zero the device reverts back to normal
3396 * filtering operation. A negative @inc value is used to drop the counter
3397 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003398 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 */
3400
Wang Chendad9b332008-06-18 01:48:28 -07003401int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
3403 unsigned short old_flags = dev->flags;
3404
Patrick McHardy24023452007-07-14 18:51:31 -07003405 ASSERT_RTNL();
3406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003408 dev->allmulti += inc;
3409 if (dev->allmulti == 0) {
3410 /*
3411 * Avoid overflow.
3412 * If inc causes overflow, untouch allmulti and return error.
3413 */
3414 if (inc < 0)
3415 dev->flags &= ~IFF_ALLMULTI;
3416 else {
3417 dev->allmulti -= inc;
3418 printk(KERN_WARNING "%s: allmulti touches roof, "
3419 "set allmulti failed, allmulti feature of "
3420 "device might be broken.\n", dev->name);
3421 return -EOVERFLOW;
3422 }
3423 }
Patrick McHardy24023452007-07-14 18:51:31 -07003424 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003425 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003426 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003427 }
Wang Chendad9b332008-06-18 01:48:28 -07003428 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003429}
3430
3431/*
3432 * Upload unicast and multicast address lists to device and
3433 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003434 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003435 * are present.
3436 */
3437void __dev_set_rx_mode(struct net_device *dev)
3438{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003439 const struct net_device_ops *ops = dev->netdev_ops;
3440
Patrick McHardy4417da62007-06-27 01:28:10 -07003441 /* dev_open will call this function so the list will stay sane. */
3442 if (!(dev->flags&IFF_UP))
3443 return;
3444
3445 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003446 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003447
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003448 if (ops->ndo_set_rx_mode)
3449 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003450 else {
3451 /* Unicast addresses changes may only happen under the rtnl,
3452 * therefore calling __dev_set_promiscuity here is safe.
3453 */
3454 if (dev->uc_count > 0 && !dev->uc_promisc) {
3455 __dev_set_promiscuity(dev, 1);
3456 dev->uc_promisc = 1;
3457 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3458 __dev_set_promiscuity(dev, -1);
3459 dev->uc_promisc = 0;
3460 }
3461
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003462 if (ops->ndo_set_multicast_list)
3463 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003464 }
3465}
3466
3467void dev_set_rx_mode(struct net_device *dev)
3468{
David S. Millerb9e40852008-07-15 00:15:08 -07003469 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003470 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003471 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472}
3473
Jiri Pirkof001fde2009-05-05 02:48:28 +00003474/* hw addresses list handling functions */
3475
Jiri Pirkoccffad252009-05-22 23:22:17 +00003476static int __hw_addr_add(struct list_head *list, int *delta,
3477 unsigned char *addr, int addr_len,
3478 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003479{
3480 struct netdev_hw_addr *ha;
3481 int alloc_size;
3482
3483 if (addr_len > MAX_ADDR_LEN)
3484 return -EINVAL;
3485
Jiri Pirkoccffad252009-05-22 23:22:17 +00003486 list_for_each_entry(ha, list, list) {
3487 if (!memcmp(ha->addr, addr, addr_len) &&
3488 ha->type == addr_type) {
3489 ha->refcount++;
3490 return 0;
3491 }
3492 }
3493
3494
Jiri Pirkof001fde2009-05-05 02:48:28 +00003495 alloc_size = sizeof(*ha);
3496 if (alloc_size < L1_CACHE_BYTES)
3497 alloc_size = L1_CACHE_BYTES;
3498 ha = kmalloc(alloc_size, GFP_ATOMIC);
3499 if (!ha)
3500 return -ENOMEM;
3501 memcpy(ha->addr, addr, addr_len);
3502 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003503 ha->refcount = 1;
3504 ha->synced = false;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003505 list_add_tail_rcu(&ha->list, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003506 if (delta)
3507 (*delta)++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003508 return 0;
3509}
3510
3511static void ha_rcu_free(struct rcu_head *head)
3512{
3513 struct netdev_hw_addr *ha;
3514
3515 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3516 kfree(ha);
3517}
3518
Jiri Pirkoccffad252009-05-22 23:22:17 +00003519static int __hw_addr_del(struct list_head *list, int *delta,
3520 unsigned char *addr, int addr_len,
3521 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003522{
3523 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003524
3525 list_for_each_entry(ha, list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003526 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003527 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003528 if (--ha->refcount)
3529 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003530 list_del_rcu(&ha->list);
3531 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003532 if (delta)
3533 (*delta)--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003534 return 0;
3535 }
3536 }
3537 return -ENOENT;
3538}
3539
Jiri Pirkoccffad252009-05-22 23:22:17 +00003540static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3541 struct list_head *from_list, int addr_len,
3542 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003543{
3544 int err;
3545 struct netdev_hw_addr *ha, *ha2;
3546 unsigned char type;
3547
3548 list_for_each_entry(ha, from_list, list) {
3549 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003550 err = __hw_addr_add(to_list, to_delta, ha->addr,
3551 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003552 if (err)
3553 goto unroll;
3554 }
3555 return 0;
3556
3557unroll:
3558 list_for_each_entry(ha2, from_list, list) {
3559 if (ha2 == ha)
3560 break;
3561 type = addr_type ? addr_type : ha2->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003562 __hw_addr_del(to_list, to_delta, ha2->addr,
3563 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003564 }
3565 return err;
3566}
3567
Jiri Pirkoccffad252009-05-22 23:22:17 +00003568static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3569 struct list_head *from_list, int addr_len,
3570 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003571{
3572 struct netdev_hw_addr *ha;
3573 unsigned char type;
3574
3575 list_for_each_entry(ha, from_list, list) {
3576 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003577 __hw_addr_del(to_list, to_delta, ha->addr,
3578 addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003579 }
3580}
3581
Jiri Pirkoccffad252009-05-22 23:22:17 +00003582static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3583 struct list_head *from_list, int *from_delta,
3584 int addr_len)
3585{
3586 int err = 0;
3587 struct netdev_hw_addr *ha, *tmp;
3588
3589 list_for_each_entry_safe(ha, tmp, from_list, list) {
3590 if (!ha->synced) {
3591 err = __hw_addr_add(to_list, to_delta, ha->addr,
3592 addr_len, ha->type);
3593 if (err)
3594 break;
3595 ha->synced = true;
3596 ha->refcount++;
3597 } else if (ha->refcount == 1) {
3598 __hw_addr_del(to_list, to_delta, ha->addr,
3599 addr_len, ha->type);
3600 __hw_addr_del(from_list, from_delta, ha->addr,
3601 addr_len, ha->type);
3602 }
3603 }
3604 return err;
3605}
3606
3607static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3608 struct list_head *from_list, int *from_delta,
3609 int addr_len)
3610{
3611 struct netdev_hw_addr *ha, *tmp;
3612
3613 list_for_each_entry_safe(ha, tmp, from_list, list) {
3614 if (ha->synced) {
3615 __hw_addr_del(to_list, to_delta, ha->addr,
3616 addr_len, ha->type);
3617 ha->synced = false;
3618 __hw_addr_del(from_list, from_delta, ha->addr,
3619 addr_len, ha->type);
3620 }
3621 }
3622}
3623
3624
Jiri Pirkof001fde2009-05-05 02:48:28 +00003625static void __hw_addr_flush(struct list_head *list)
3626{
3627 struct netdev_hw_addr *ha, *tmp;
3628
3629 list_for_each_entry_safe(ha, tmp, list, list) {
3630 list_del_rcu(&ha->list);
3631 call_rcu(&ha->rcu_head, ha_rcu_free);
3632 }
3633}
3634
3635/* Device addresses handling functions */
3636
3637static void dev_addr_flush(struct net_device *dev)
3638{
3639 /* rtnl_mutex must be held here */
3640
3641 __hw_addr_flush(&dev->dev_addr_list);
3642 dev->dev_addr = NULL;
3643}
3644
3645static int dev_addr_init(struct net_device *dev)
3646{
3647 unsigned char addr[MAX_ADDR_LEN];
3648 struct netdev_hw_addr *ha;
3649 int err;
3650
3651 /* rtnl_mutex must be held here */
3652
3653 INIT_LIST_HEAD(&dev->dev_addr_list);
3654 memset(addr, 0, sizeof(*addr));
Jiri Pirkoccffad252009-05-22 23:22:17 +00003655 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003656 NETDEV_HW_ADDR_T_LAN);
3657 if (!err) {
3658 /*
3659 * Get the first (previously created) address from the list
3660 * and set dev_addr pointer to this location.
3661 */
3662 ha = list_first_entry(&dev->dev_addr_list,
3663 struct netdev_hw_addr, list);
3664 dev->dev_addr = ha->addr;
3665 }
3666 return err;
3667}
3668
3669/**
3670 * dev_addr_add - Add a device address
3671 * @dev: device
3672 * @addr: address to add
3673 * @addr_type: address type
3674 *
3675 * Add a device address to the device or increase the reference count if
3676 * it already exists.
3677 *
3678 * The caller must hold the rtnl_mutex.
3679 */
3680int dev_addr_add(struct net_device *dev, unsigned char *addr,
3681 unsigned char addr_type)
3682{
3683 int err;
3684
3685 ASSERT_RTNL();
3686
Jiri Pirkoccffad252009-05-22 23:22:17 +00003687 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003688 addr_type);
3689 if (!err)
3690 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3691 return err;
3692}
3693EXPORT_SYMBOL(dev_addr_add);
3694
3695/**
3696 * dev_addr_del - Release a device address.
3697 * @dev: device
3698 * @addr: address to delete
3699 * @addr_type: address type
3700 *
3701 * Release reference to a device address and remove it from the device
3702 * if the reference count drops to zero.
3703 *
3704 * The caller must hold the rtnl_mutex.
3705 */
3706int dev_addr_del(struct net_device *dev, unsigned char *addr,
3707 unsigned char addr_type)
3708{
3709 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003710 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003711
3712 ASSERT_RTNL();
3713
Jiri Pirkoccffad252009-05-22 23:22:17 +00003714 /*
3715 * We can not remove the first address from the list because
3716 * dev->dev_addr points to that.
3717 */
3718 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3719 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3720 return -ENOENT;
3721
3722 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3723 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003724 if (!err)
3725 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3726 return err;
3727}
3728EXPORT_SYMBOL(dev_addr_del);
3729
3730/**
3731 * dev_addr_add_multiple - Add device addresses from another device
3732 * @to_dev: device to which addresses will be added
3733 * @from_dev: device from which addresses will be added
3734 * @addr_type: address type - 0 means type will be used from from_dev
3735 *
3736 * Add device addresses of the one device to another.
3737 **
3738 * The caller must hold the rtnl_mutex.
3739 */
3740int dev_addr_add_multiple(struct net_device *to_dev,
3741 struct net_device *from_dev,
3742 unsigned char addr_type)
3743{
3744 int err;
3745
3746 ASSERT_RTNL();
3747
3748 if (from_dev->addr_len != to_dev->addr_len)
3749 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003750 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3751 &from_dev->dev_addr_list,
3752 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003753 if (!err)
3754 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3755 return err;
3756}
3757EXPORT_SYMBOL(dev_addr_add_multiple);
3758
3759/**
3760 * dev_addr_del_multiple - Delete device addresses by another device
3761 * @to_dev: device where the addresses will be deleted
3762 * @from_dev: device by which addresses the addresses will be deleted
3763 * @addr_type: address type - 0 means type will used from from_dev
3764 *
3765 * Deletes addresses in to device by the list of addresses in from device.
3766 *
3767 * The caller must hold the rtnl_mutex.
3768 */
3769int dev_addr_del_multiple(struct net_device *to_dev,
3770 struct net_device *from_dev,
3771 unsigned char addr_type)
3772{
3773 ASSERT_RTNL();
3774
3775 if (from_dev->addr_len != to_dev->addr_len)
3776 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003777 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3778 &from_dev->dev_addr_list,
3779 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003780 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3781 return 0;
3782}
3783EXPORT_SYMBOL(dev_addr_del_multiple);
3784
3785/* unicast and multicast addresses handling functions */
3786
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003787int __dev_addr_delete(struct dev_addr_list **list, int *count,
3788 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003789{
3790 struct dev_addr_list *da;
3791
3792 for (; (da = *list) != NULL; list = &da->next) {
3793 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3794 alen == da->da_addrlen) {
3795 if (glbl) {
3796 int old_glbl = da->da_gusers;
3797 da->da_gusers = 0;
3798 if (old_glbl == 0)
3799 break;
3800 }
3801 if (--da->da_users)
3802 return 0;
3803
3804 *list = da->next;
3805 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003806 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003807 return 0;
3808 }
3809 }
3810 return -ENOENT;
3811}
3812
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003813int __dev_addr_add(struct dev_addr_list **list, int *count,
3814 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003815{
3816 struct dev_addr_list *da;
3817
3818 for (da = *list; da != NULL; da = da->next) {
3819 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3820 da->da_addrlen == alen) {
3821 if (glbl) {
3822 int old_glbl = da->da_gusers;
3823 da->da_gusers = 1;
3824 if (old_glbl)
3825 return 0;
3826 }
3827 da->da_users++;
3828 return 0;
3829 }
3830 }
3831
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003832 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003833 if (da == NULL)
3834 return -ENOMEM;
3835 memcpy(da->da_addr, addr, alen);
3836 da->da_addrlen = alen;
3837 da->da_users = 1;
3838 da->da_gusers = glbl ? 1 : 0;
3839 da->next = *list;
3840 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003841 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003842 return 0;
3843}
3844
Patrick McHardy4417da62007-06-27 01:28:10 -07003845/**
3846 * dev_unicast_delete - Release secondary unicast address.
3847 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003848 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003849 *
3850 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003851 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003852 *
3853 * The caller must hold the rtnl_mutex.
3854 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003855int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003856{
3857 int err;
3858
3859 ASSERT_RTNL();
3860
Jiri Pirkoccffad252009-05-22 23:22:17 +00003861 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3862 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003863 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003864 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003865 return err;
3866}
3867EXPORT_SYMBOL(dev_unicast_delete);
3868
3869/**
3870 * dev_unicast_add - add a secondary unicast address
3871 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003872 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003873 *
3874 * Add a secondary unicast address to the device or increase
3875 * the reference count if it already exists.
3876 *
3877 * The caller must hold the rtnl_mutex.
3878 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003879int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003880{
3881 int err;
3882
3883 ASSERT_RTNL();
3884
Jiri Pirkoccffad252009-05-22 23:22:17 +00003885 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3886 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003887 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003888 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003889 return err;
3890}
3891EXPORT_SYMBOL(dev_unicast_add);
3892
Chris Leeche83a2ea2008-01-31 16:53:23 -08003893int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3894 struct dev_addr_list **from, int *from_count)
3895{
3896 struct dev_addr_list *da, *next;
3897 int err = 0;
3898
3899 da = *from;
3900 while (da != NULL) {
3901 next = da->next;
3902 if (!da->da_synced) {
3903 err = __dev_addr_add(to, to_count,
3904 da->da_addr, da->da_addrlen, 0);
3905 if (err < 0)
3906 break;
3907 da->da_synced = 1;
3908 da->da_users++;
3909 } else if (da->da_users == 1) {
3910 __dev_addr_delete(to, to_count,
3911 da->da_addr, da->da_addrlen, 0);
3912 __dev_addr_delete(from, from_count,
3913 da->da_addr, da->da_addrlen, 0);
3914 }
3915 da = next;
3916 }
3917 return err;
3918}
3919
3920void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3921 struct dev_addr_list **from, int *from_count)
3922{
3923 struct dev_addr_list *da, *next;
3924
3925 da = *from;
3926 while (da != NULL) {
3927 next = da->next;
3928 if (da->da_synced) {
3929 __dev_addr_delete(to, to_count,
3930 da->da_addr, da->da_addrlen, 0);
3931 da->da_synced = 0;
3932 __dev_addr_delete(from, from_count,
3933 da->da_addr, da->da_addrlen, 0);
3934 }
3935 da = next;
3936 }
3937}
3938
3939/**
3940 * dev_unicast_sync - Synchronize device's unicast list to another device
3941 * @to: destination device
3942 * @from: source device
3943 *
3944 * Add newly added addresses to the destination device and release
Jiri Pirkoccffad252009-05-22 23:22:17 +00003945 * addresses that have no users left.
Chris Leeche83a2ea2008-01-31 16:53:23 -08003946 *
3947 * This function is intended to be called from the dev->set_rx_mode
3948 * function of layered software devices.
3949 */
3950int dev_unicast_sync(struct net_device *to, struct net_device *from)
3951{
3952 int err = 0;
3953
Jiri Pirkoccffad252009-05-22 23:22:17 +00003954 ASSERT_RTNL();
3955
3956 if (to->addr_len != from->addr_len)
3957 return -EINVAL;
3958
3959 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3960 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003961 if (!err)
3962 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003963 return err;
3964}
3965EXPORT_SYMBOL(dev_unicast_sync);
3966
3967/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003968 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003969 * @to: destination device
3970 * @from: source device
3971 *
3972 * Remove all addresses that were added to the destination device by
3973 * dev_unicast_sync(). This function is intended to be called from the
3974 * dev->stop function of layered software devices.
3975 */
3976void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3977{
Jiri Pirkoccffad252009-05-22 23:22:17 +00003978 ASSERT_RTNL();
Chris Leeche83a2ea2008-01-31 16:53:23 -08003979
Jiri Pirkoccffad252009-05-22 23:22:17 +00003980 if (to->addr_len != from->addr_len)
3981 return;
3982
3983 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3984 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003985 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003986}
3987EXPORT_SYMBOL(dev_unicast_unsync);
3988
Jiri Pirkoccffad252009-05-22 23:22:17 +00003989static void dev_unicast_flush(struct net_device *dev)
3990{
3991 /* rtnl_mutex must be held here */
3992
3993 __hw_addr_flush(&dev->uc_list);
3994 dev->uc_count = 0;
3995}
3996
3997static void dev_unicast_init(struct net_device *dev)
3998{
3999 /* rtnl_mutex must be held here */
4000
4001 INIT_LIST_HEAD(&dev->uc_list);
4002}
4003
4004
Denis Cheng12972622007-07-18 02:12:56 -07004005static void __dev_addr_discard(struct dev_addr_list **list)
4006{
4007 struct dev_addr_list *tmp;
4008
4009 while (*list != NULL) {
4010 tmp = *list;
4011 *list = tmp->next;
4012 if (tmp->da_users > tmp->da_gusers)
4013 printk("__dev_addr_discard: address leakage! "
4014 "da_users=%d\n", tmp->da_users);
4015 kfree(tmp);
4016 }
4017}
4018
Denis Cheng26cc2522007-07-18 02:12:03 -07004019static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004020{
David S. Millerb9e40852008-07-15 00:15:08 -07004021 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004022
Denis Cheng456ad752007-07-18 02:10:54 -07004023 __dev_addr_discard(&dev->mc_list);
4024 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004025
David S. Millerb9e40852008-07-15 00:15:08 -07004026 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004027}
4028
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004029/**
4030 * dev_get_flags - get flags reported to userspace
4031 * @dev: device
4032 *
4033 * Get the combination of flag bits exported through APIs to userspace.
4034 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035unsigned dev_get_flags(const struct net_device *dev)
4036{
4037 unsigned flags;
4038
4039 flags = (dev->flags & ~(IFF_PROMISC |
4040 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004041 IFF_RUNNING |
4042 IFF_LOWER_UP |
4043 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044 (dev->gflags & (IFF_PROMISC |
4045 IFF_ALLMULTI));
4046
Stefan Rompfb00055a2006-03-20 17:09:11 -08004047 if (netif_running(dev)) {
4048 if (netif_oper_up(dev))
4049 flags |= IFF_RUNNING;
4050 if (netif_carrier_ok(dev))
4051 flags |= IFF_LOWER_UP;
4052 if (netif_dormant(dev))
4053 flags |= IFF_DORMANT;
4054 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
4056 return flags;
4057}
4058
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004059/**
4060 * dev_change_flags - change device settings
4061 * @dev: device
4062 * @flags: device state flags
4063 *
4064 * Change settings on device based state flags. The flags are
4065 * in the userspace exported format.
4066 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067int dev_change_flags(struct net_device *dev, unsigned flags)
4068{
Thomas Graf7c355f52007-06-05 16:03:03 -07004069 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 int old_flags = dev->flags;
4071
Patrick McHardy24023452007-07-14 18:51:31 -07004072 ASSERT_RTNL();
4073
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 /*
4075 * Set the flags on our device.
4076 */
4077
4078 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4079 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4080 IFF_AUTOMEDIA)) |
4081 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4082 IFF_ALLMULTI));
4083
4084 /*
4085 * Load in the correct multicast list now the flags have changed.
4086 */
4087
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004088 if ((old_flags ^ flags) & IFF_MULTICAST)
4089 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004090
Patrick McHardy4417da62007-06-27 01:28:10 -07004091 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
4093 /*
4094 * Have we downed the interface. We handle IFF_UP ourselves
4095 * according to user attempts to set it, rather than blindly
4096 * setting it.
4097 */
4098
4099 ret = 0;
4100 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4101 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4102
4103 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004104 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 }
4106
4107 if (dev->flags & IFF_UP &&
4108 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4109 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004110 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
4112 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4113 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4114 dev->gflags ^= IFF_PROMISC;
4115 dev_set_promiscuity(dev, inc);
4116 }
4117
4118 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4119 is important. Some (broken) drivers set IFF_PROMISC, when
4120 IFF_ALLMULTI is requested not asking us and not reporting.
4121 */
4122 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4123 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4124 dev->gflags ^= IFF_ALLMULTI;
4125 dev_set_allmulti(dev, inc);
4126 }
4127
Thomas Graf7c355f52007-06-05 16:03:03 -07004128 /* Exclude state transition flags, already notified */
4129 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4130 if (changes)
4131 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132
4133 return ret;
4134}
4135
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004136/**
4137 * dev_set_mtu - Change maximum transfer unit
4138 * @dev: device
4139 * @new_mtu: new transfer unit
4140 *
4141 * Change the maximum transfer size of the network device.
4142 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143int dev_set_mtu(struct net_device *dev, int new_mtu)
4144{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004145 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 int err;
4147
4148 if (new_mtu == dev->mtu)
4149 return 0;
4150
4151 /* MTU must be positive. */
4152 if (new_mtu < 0)
4153 return -EINVAL;
4154
4155 if (!netif_device_present(dev))
4156 return -ENODEV;
4157
4158 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004159 if (ops->ndo_change_mtu)
4160 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 else
4162 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004165 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 return err;
4167}
4168
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004169/**
4170 * dev_set_mac_address - Change Media Access Control Address
4171 * @dev: device
4172 * @sa: new address
4173 *
4174 * Change the hardware (MAC) address of the device
4175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4177{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004178 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 int err;
4180
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004181 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 return -EOPNOTSUPP;
4183 if (sa->sa_family != dev->type)
4184 return -EINVAL;
4185 if (!netif_device_present(dev))
4186 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004187 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004189 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 return err;
4191}
4192
4193/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004194 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004196static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197{
4198 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004199 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
4201 if (!dev)
4202 return -ENODEV;
4203
4204 switch (cmd) {
4205 case SIOCGIFFLAGS: /* Get interface flags */
4206 ifr->ifr_flags = dev_get_flags(dev);
4207 return 0;
4208
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 case SIOCGIFMETRIC: /* Get the metric on the interface
4210 (currently unused) */
4211 ifr->ifr_metric = 0;
4212 return 0;
4213
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 case SIOCGIFMTU: /* Get the MTU of a device */
4215 ifr->ifr_mtu = dev->mtu;
4216 return 0;
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 case SIOCGIFHWADDR:
4219 if (!dev->addr_len)
4220 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4221 else
4222 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4223 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4224 ifr->ifr_hwaddr.sa_family = dev->type;
4225 return 0;
4226
Jeff Garzik14e3e072007-10-08 00:06:32 -07004227 case SIOCGIFSLAVE:
4228 err = -EINVAL;
4229 break;
4230
4231 case SIOCGIFMAP:
4232 ifr->ifr_map.mem_start = dev->mem_start;
4233 ifr->ifr_map.mem_end = dev->mem_end;
4234 ifr->ifr_map.base_addr = dev->base_addr;
4235 ifr->ifr_map.irq = dev->irq;
4236 ifr->ifr_map.dma = dev->dma;
4237 ifr->ifr_map.port = dev->if_port;
4238 return 0;
4239
4240 case SIOCGIFINDEX:
4241 ifr->ifr_ifindex = dev->ifindex;
4242 return 0;
4243
4244 case SIOCGIFTXQLEN:
4245 ifr->ifr_qlen = dev->tx_queue_len;
4246 return 0;
4247
4248 default:
4249 /* dev_ioctl() should ensure this case
4250 * is never reached
4251 */
4252 WARN_ON(1);
4253 err = -EINVAL;
4254 break;
4255
4256 }
4257 return err;
4258}
4259
4260/*
4261 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4262 */
4263static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4264{
4265 int err;
4266 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004267 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004268
4269 if (!dev)
4270 return -ENODEV;
4271
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004272 ops = dev->netdev_ops;
4273
Jeff Garzik14e3e072007-10-08 00:06:32 -07004274 switch (cmd) {
4275 case SIOCSIFFLAGS: /* Set interface flags */
4276 return dev_change_flags(dev, ifr->ifr_flags);
4277
4278 case SIOCSIFMETRIC: /* Set the metric on the interface
4279 (currently unused) */
4280 return -EOPNOTSUPP;
4281
4282 case SIOCSIFMTU: /* Set the MTU of a device */
4283 return dev_set_mtu(dev, ifr->ifr_mtu);
4284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 case SIOCSIFHWADDR:
4286 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4287
4288 case SIOCSIFHWBROADCAST:
4289 if (ifr->ifr_hwaddr.sa_family != dev->type)
4290 return -EINVAL;
4291 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4292 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004293 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294 return 0;
4295
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004297 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 if (!netif_device_present(dev))
4299 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004300 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 }
4302 return -EOPNOTSUPP;
4303
4304 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004305 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4307 return -EINVAL;
4308 if (!netif_device_present(dev))
4309 return -ENODEV;
4310 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4311 dev->addr_len, 1);
4312
4313 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004314 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4316 return -EINVAL;
4317 if (!netif_device_present(dev))
4318 return -ENODEV;
4319 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4320 dev->addr_len, 1);
4321
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 case SIOCSIFTXQLEN:
4323 if (ifr->ifr_qlen < 0)
4324 return -EINVAL;
4325 dev->tx_queue_len = ifr->ifr_qlen;
4326 return 0;
4327
4328 case SIOCSIFNAME:
4329 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4330 return dev_change_name(dev, ifr->ifr_newname);
4331
4332 /*
4333 * Unknown or private ioctl
4334 */
4335
4336 default:
4337 if ((cmd >= SIOCDEVPRIVATE &&
4338 cmd <= SIOCDEVPRIVATE + 15) ||
4339 cmd == SIOCBONDENSLAVE ||
4340 cmd == SIOCBONDRELEASE ||
4341 cmd == SIOCBONDSETHWADDR ||
4342 cmd == SIOCBONDSLAVEINFOQUERY ||
4343 cmd == SIOCBONDINFOQUERY ||
4344 cmd == SIOCBONDCHANGEACTIVE ||
4345 cmd == SIOCGMIIPHY ||
4346 cmd == SIOCGMIIREG ||
4347 cmd == SIOCSMIIREG ||
4348 cmd == SIOCBRADDIF ||
4349 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004350 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351 cmd == SIOCWANDEV) {
4352 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004353 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004355 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 else
4357 err = -ENODEV;
4358 }
4359 } else
4360 err = -EINVAL;
4361
4362 }
4363 return err;
4364}
4365
4366/*
4367 * This function handles all "interface"-type I/O control requests. The actual
4368 * 'doing' part of this is dev_ifsioc above.
4369 */
4370
4371/**
4372 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004373 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 * @cmd: command to issue
4375 * @arg: pointer to a struct ifreq in user space
4376 *
4377 * Issue ioctl functions to devices. This is normally called by the
4378 * user space syscall interfaces but can sometimes be useful for
4379 * other purposes. The return value is the return from the syscall if
4380 * positive or a negative errno code on error.
4381 */
4382
Eric W. Biederman881d9662007-09-17 11:56:21 -07004383int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384{
4385 struct ifreq ifr;
4386 int ret;
4387 char *colon;
4388
4389 /* One special case: SIOCGIFCONF takes ifconf argument
4390 and requires shared lock, because it sleeps writing
4391 to user space.
4392 */
4393
4394 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004395 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004396 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004397 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398 return ret;
4399 }
4400 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004401 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402
4403 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4404 return -EFAULT;
4405
4406 ifr.ifr_name[IFNAMSIZ-1] = 0;
4407
4408 colon = strchr(ifr.ifr_name, ':');
4409 if (colon)
4410 *colon = 0;
4411
4412 /*
4413 * See which interface the caller is talking about.
4414 */
4415
4416 switch (cmd) {
4417 /*
4418 * These ioctl calls:
4419 * - can be done by all.
4420 * - atomic and do not require locking.
4421 * - return a value
4422 */
4423 case SIOCGIFFLAGS:
4424 case SIOCGIFMETRIC:
4425 case SIOCGIFMTU:
4426 case SIOCGIFHWADDR:
4427 case SIOCGIFSLAVE:
4428 case SIOCGIFMAP:
4429 case SIOCGIFINDEX:
4430 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004431 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004433 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434 read_unlock(&dev_base_lock);
4435 if (!ret) {
4436 if (colon)
4437 *colon = ':';
4438 if (copy_to_user(arg, &ifr,
4439 sizeof(struct ifreq)))
4440 ret = -EFAULT;
4441 }
4442 return ret;
4443
4444 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004445 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004447 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 rtnl_unlock();
4449 if (!ret) {
4450 if (colon)
4451 *colon = ':';
4452 if (copy_to_user(arg, &ifr,
4453 sizeof(struct ifreq)))
4454 ret = -EFAULT;
4455 }
4456 return ret;
4457
4458 /*
4459 * These ioctl calls:
4460 * - require superuser power.
4461 * - require strict serialization.
4462 * - return a value
4463 */
4464 case SIOCGMIIPHY:
4465 case SIOCGMIIREG:
4466 case SIOCSIFNAME:
4467 if (!capable(CAP_NET_ADMIN))
4468 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004469 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004471 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 rtnl_unlock();
4473 if (!ret) {
4474 if (colon)
4475 *colon = ':';
4476 if (copy_to_user(arg, &ifr,
4477 sizeof(struct ifreq)))
4478 ret = -EFAULT;
4479 }
4480 return ret;
4481
4482 /*
4483 * These ioctl calls:
4484 * - require superuser power.
4485 * - require strict serialization.
4486 * - do not return a value
4487 */
4488 case SIOCSIFFLAGS:
4489 case SIOCSIFMETRIC:
4490 case SIOCSIFMTU:
4491 case SIOCSIFMAP:
4492 case SIOCSIFHWADDR:
4493 case SIOCSIFSLAVE:
4494 case SIOCADDMULTI:
4495 case SIOCDELMULTI:
4496 case SIOCSIFHWBROADCAST:
4497 case SIOCSIFTXQLEN:
4498 case SIOCSMIIREG:
4499 case SIOCBONDENSLAVE:
4500 case SIOCBONDRELEASE:
4501 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 case SIOCBONDCHANGEACTIVE:
4503 case SIOCBRADDIF:
4504 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004505 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 if (!capable(CAP_NET_ADMIN))
4507 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004508 /* fall through */
4509 case SIOCBONDSLAVEINFOQUERY:
4510 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004511 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004513 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 rtnl_unlock();
4515 return ret;
4516
4517 case SIOCGIFMEM:
4518 /* Get the per device memory space. We can add this but
4519 * currently do not support it */
4520 case SIOCSIFMEM:
4521 /* Set the per device memory buffer space.
4522 * Not applicable in our case */
4523 case SIOCSIFLINK:
4524 return -EINVAL;
4525
4526 /*
4527 * Unknown or private ioctl.
4528 */
4529 default:
4530 if (cmd == SIOCWANDEV ||
4531 (cmd >= SIOCDEVPRIVATE &&
4532 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004533 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004535 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 rtnl_unlock();
4537 if (!ret && copy_to_user(arg, &ifr,
4538 sizeof(struct ifreq)))
4539 ret = -EFAULT;
4540 return ret;
4541 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004543 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004544 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 return -EINVAL;
4546 }
4547}
4548
4549
4550/**
4551 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004552 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 *
4554 * Returns a suitable unique value for a new device interface
4555 * number. The caller must hold the rtnl semaphore or the
4556 * dev_base_lock to be sure it remains unique.
4557 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004558static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559{
4560 static int ifindex;
4561 for (;;) {
4562 if (++ifindex <= 0)
4563 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004564 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 return ifindex;
4566 }
4567}
4568
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004570static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004572static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575}
4576
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004577static void rollback_registered(struct net_device *dev)
4578{
4579 BUG_ON(dev_boot_phase);
4580 ASSERT_RTNL();
4581
4582 /* Some devices call without registering for initialization unwind. */
4583 if (dev->reg_state == NETREG_UNINITIALIZED) {
4584 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4585 "was registered\n", dev->name, dev);
4586
4587 WARN_ON(1);
4588 return;
4589 }
4590
4591 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4592
4593 /* If device is running, close it first. */
4594 dev_close(dev);
4595
4596 /* And unlink it from device chain. */
4597 unlist_netdevice(dev);
4598
4599 dev->reg_state = NETREG_UNREGISTERING;
4600
4601 synchronize_net();
4602
4603 /* Shutdown queueing discipline. */
4604 dev_shutdown(dev);
4605
4606
4607 /* Notify protocols, that we are about to destroy
4608 this device. They should clean all the things.
4609 */
4610 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4611
4612 /*
4613 * Flush the unicast and multicast chains
4614 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004615 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004616 dev_addr_discard(dev);
4617
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004618 if (dev->netdev_ops->ndo_uninit)
4619 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004620
4621 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004622 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004623
4624 /* Remove entries from kobject tree */
4625 netdev_unregister_kobject(dev);
4626
4627 synchronize_net();
4628
4629 dev_put(dev);
4630}
4631
David S. Millere8a04642008-07-17 00:34:19 -07004632static void __netdev_init_queue_locks_one(struct net_device *dev,
4633 struct netdev_queue *dev_queue,
4634 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004635{
4636 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004637 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004638 dev_queue->xmit_lock_owner = -1;
4639}
4640
4641static void netdev_init_queue_locks(struct net_device *dev)
4642{
David S. Millere8a04642008-07-17 00:34:19 -07004643 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4644 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004645}
4646
Herbert Xub63365a2008-10-23 01:11:29 -07004647unsigned long netdev_fix_features(unsigned long features, const char *name)
4648{
4649 /* Fix illegal SG+CSUM combinations. */
4650 if ((features & NETIF_F_SG) &&
4651 !(features & NETIF_F_ALL_CSUM)) {
4652 if (name)
4653 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4654 "checksum feature.\n", name);
4655 features &= ~NETIF_F_SG;
4656 }
4657
4658 /* TSO requires that SG is present as well. */
4659 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4660 if (name)
4661 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4662 "SG feature.\n", name);
4663 features &= ~NETIF_F_TSO;
4664 }
4665
4666 if (features & NETIF_F_UFO) {
4667 if (!(features & NETIF_F_GEN_CSUM)) {
4668 if (name)
4669 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4670 "since no NETIF_F_HW_CSUM feature.\n",
4671 name);
4672 features &= ~NETIF_F_UFO;
4673 }
4674
4675 if (!(features & NETIF_F_SG)) {
4676 if (name)
4677 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4678 "since no NETIF_F_SG feature.\n", name);
4679 features &= ~NETIF_F_UFO;
4680 }
4681 }
4682
4683 return features;
4684}
4685EXPORT_SYMBOL(netdev_fix_features);
4686
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687/**
4688 * register_netdevice - register a network device
4689 * @dev: device to register
4690 *
4691 * Take a completed network device structure and add it to the kernel
4692 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4693 * chain. 0 is returned on success. A negative errno code is returned
4694 * on a failure to set up the device, or if the name is a duplicate.
4695 *
4696 * Callers must hold the rtnl semaphore. You may want
4697 * register_netdev() instead of this.
4698 *
4699 * BUGS:
4700 * The locking appears insufficient to guarantee two parallel registers
4701 * will not get the same name.
4702 */
4703
4704int register_netdevice(struct net_device *dev)
4705{
4706 struct hlist_head *head;
4707 struct hlist_node *p;
4708 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004709 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710
4711 BUG_ON(dev_boot_phase);
4712 ASSERT_RTNL();
4713
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004714 might_sleep();
4715
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 /* When net_device's are persistent, this will be fatal. */
4717 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004718 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719
David S. Millerf1f28aa2008-07-15 00:08:33 -07004720 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004721 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004722 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724 dev->iflink = -1;
4725
4726 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004727 if (dev->netdev_ops->ndo_init) {
4728 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729 if (ret) {
4730 if (ret > 0)
4731 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004732 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 }
4734 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004735
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736 if (!dev_valid_name(dev->name)) {
4737 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004738 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 }
4740
Eric W. Biederman881d9662007-09-17 11:56:21 -07004741 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 if (dev->iflink == -1)
4743 dev->iflink = dev->ifindex;
4744
4745 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004746 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 hlist_for_each(p, head) {
4748 struct net_device *d
4749 = hlist_entry(p, struct net_device, name_hlist);
4750 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4751 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004752 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004756 /* Fix illegal checksum combinations */
4757 if ((dev->features & NETIF_F_HW_CSUM) &&
4758 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4759 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4760 dev->name);
4761 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4762 }
4763
4764 if ((dev->features & NETIF_F_NO_CSUM) &&
4765 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4766 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4767 dev->name);
4768 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4769 }
4770
Herbert Xub63365a2008-10-23 01:11:29 -07004771 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004773 /* Enable software GSO if SG is supported. */
4774 if (dev->features & NETIF_F_SG)
4775 dev->features |= NETIF_F_GSO;
4776
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004777 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004778 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004779 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004780 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004781 dev->reg_state = NETREG_REGISTERED;
4782
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 /*
4784 * Default initial state at registry is that the
4785 * device is present.
4786 */
4787
4788 set_bit(__LINK_STATE_PRESENT, &dev->state);
4789
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004792 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793
4794 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004795 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004796 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004797 if (ret) {
4798 rollback_registered(dev);
4799 dev->reg_state = NETREG_UNREGISTERED;
4800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
4802out:
4803 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004804
4805err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004806 if (dev->netdev_ops->ndo_uninit)
4807 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004808 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809}
4810
4811/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004812 * init_dummy_netdev - init a dummy network device for NAPI
4813 * @dev: device to init
4814 *
4815 * This takes a network device structure and initialize the minimum
4816 * amount of fields so it can be used to schedule NAPI polls without
4817 * registering a full blown interface. This is to be used by drivers
4818 * that need to tie several hardware interfaces to a single NAPI
4819 * poll scheduler due to HW limitations.
4820 */
4821int init_dummy_netdev(struct net_device *dev)
4822{
4823 /* Clear everything. Note we don't initialize spinlocks
4824 * are they aren't supposed to be taken by any of the
4825 * NAPI code and this dummy netdev is supposed to be
4826 * only ever used for NAPI polls
4827 */
4828 memset(dev, 0, sizeof(struct net_device));
4829
4830 /* make sure we BUG if trying to hit standard
4831 * register/unregister code path
4832 */
4833 dev->reg_state = NETREG_DUMMY;
4834
4835 /* initialize the ref count */
4836 atomic_set(&dev->refcnt, 1);
4837
4838 /* NAPI wants this */
4839 INIT_LIST_HEAD(&dev->napi_list);
4840
4841 /* a dummy interface is started by default */
4842 set_bit(__LINK_STATE_PRESENT, &dev->state);
4843 set_bit(__LINK_STATE_START, &dev->state);
4844
4845 return 0;
4846}
4847EXPORT_SYMBOL_GPL(init_dummy_netdev);
4848
4849
4850/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004851 * register_netdev - register a network device
4852 * @dev: device to register
4853 *
4854 * Take a completed network device structure and add it to the kernel
4855 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4856 * chain. 0 is returned on success. A negative errno code is returned
4857 * on a failure to set up the device, or if the name is a duplicate.
4858 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004859 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860 * and expands the device name if you passed a format string to
4861 * alloc_netdev.
4862 */
4863int register_netdev(struct net_device *dev)
4864{
4865 int err;
4866
4867 rtnl_lock();
4868
4869 /*
4870 * If the name is a format string the caller wants us to do a
4871 * name allocation.
4872 */
4873 if (strchr(dev->name, '%')) {
4874 err = dev_alloc_name(dev, dev->name);
4875 if (err < 0)
4876 goto out;
4877 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004878
Linus Torvalds1da177e2005-04-16 15:20:36 -07004879 err = register_netdevice(dev);
4880out:
4881 rtnl_unlock();
4882 return err;
4883}
4884EXPORT_SYMBOL(register_netdev);
4885
4886/*
4887 * netdev_wait_allrefs - wait until all references are gone.
4888 *
4889 * This is called when unregistering network devices.
4890 *
4891 * Any protocol or device that holds a reference should register
4892 * for netdevice notification, and cleanup and put back the
4893 * reference if they receive an UNREGISTER event.
4894 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004895 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896 */
4897static void netdev_wait_allrefs(struct net_device *dev)
4898{
4899 unsigned long rebroadcast_time, warning_time;
4900
4901 rebroadcast_time = warning_time = jiffies;
4902 while (atomic_read(&dev->refcnt) != 0) {
4903 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004904 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905
4906 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004907 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908
4909 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4910 &dev->state)) {
4911 /* We must not have linkwatch events
4912 * pending on unregister. If this
4913 * happens, we simply run the queue
4914 * unscheduled, resulting in a noop
4915 * for this device.
4916 */
4917 linkwatch_run_queue();
4918 }
4919
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004920 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921
4922 rebroadcast_time = jiffies;
4923 }
4924
4925 msleep(250);
4926
4927 if (time_after(jiffies, warning_time + 10 * HZ)) {
4928 printk(KERN_EMERG "unregister_netdevice: "
4929 "waiting for %s to become free. Usage "
4930 "count = %d\n",
4931 dev->name, atomic_read(&dev->refcnt));
4932 warning_time = jiffies;
4933 }
4934 }
4935}
4936
4937/* The sequence is:
4938 *
4939 * rtnl_lock();
4940 * ...
4941 * register_netdevice(x1);
4942 * register_netdevice(x2);
4943 * ...
4944 * unregister_netdevice(y1);
4945 * unregister_netdevice(y2);
4946 * ...
4947 * rtnl_unlock();
4948 * free_netdev(y1);
4949 * free_netdev(y2);
4950 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004951 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004953 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 * without deadlocking with linkwatch via keventd.
4955 * 2) Since we run with the RTNL semaphore not held, we can sleep
4956 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004957 *
4958 * We must not return until all unregister events added during
4959 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961void netdev_run_todo(void)
4962{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004963 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004966 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004967
4968 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004969
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970 while (!list_empty(&list)) {
4971 struct net_device *dev
4972 = list_entry(list.next, struct net_device, todo_list);
4973 list_del(&dev->todo_list);
4974
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004975 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 printk(KERN_ERR "network todo '%s' but state %d\n",
4977 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004978 dump_stack();
4979 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004981
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004982 dev->reg_state = NETREG_UNREGISTERED;
4983
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004984 on_each_cpu(flush_backlog, dev, 1);
4985
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004986 netdev_wait_allrefs(dev);
4987
4988 /* paranoia */
4989 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004990 WARN_ON(dev->ip_ptr);
4991 WARN_ON(dev->ip6_ptr);
4992 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004993
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004994 if (dev->destructor)
4995 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004996
4997 /* Free network device */
4998 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000}
5001
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005002/**
5003 * dev_get_stats - get network device statistics
5004 * @dev: device to get statistics from
5005 *
5006 * Get network statistics from device. The device driver may provide
5007 * its own method by setting dev->netdev_ops->get_stats; otherwise
5008 * the internal statistics structure is used.
5009 */
5010const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005011{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005012 const struct net_device_ops *ops = dev->netdev_ops;
5013
5014 if (ops->ndo_get_stats)
5015 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005016 else {
5017 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5018 struct net_device_stats *stats = &dev->stats;
5019 unsigned int i;
5020 struct netdev_queue *txq;
5021
5022 for (i = 0; i < dev->num_tx_queues; i++) {
5023 txq = netdev_get_tx_queue(dev, i);
5024 tx_bytes += txq->tx_bytes;
5025 tx_packets += txq->tx_packets;
5026 tx_dropped += txq->tx_dropped;
5027 }
5028 if (tx_bytes || tx_packets || tx_dropped) {
5029 stats->tx_bytes = tx_bytes;
5030 stats->tx_packets = tx_packets;
5031 stats->tx_dropped = tx_dropped;
5032 }
5033 return stats;
5034 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005035}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005036EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005037
David S. Millerdc2b4842008-07-08 17:18:23 -07005038static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005039 struct netdev_queue *queue,
5040 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005041{
David S. Millerdc2b4842008-07-08 17:18:23 -07005042 queue->dev = dev;
5043}
5044
David S. Millerbb949fb2008-07-08 16:55:56 -07005045static void netdev_init_queues(struct net_device *dev)
5046{
David S. Millere8a04642008-07-17 00:34:19 -07005047 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5048 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005049 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005050}
5051
Linus Torvalds1da177e2005-04-16 15:20:36 -07005052/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005053 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054 * @sizeof_priv: size of private data to allocate space for
5055 * @name: device name format string
5056 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005057 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058 *
5059 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005060 * and performs basic initialization. Also allocates subquue structs
5061 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005063struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5064 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065{
David S. Millere8a04642008-07-17 00:34:19 -07005066 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005068 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005069 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005071 BUG_ON(strlen(name) >= sizeof(dev->name));
5072
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005073 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005074 if (sizeof_priv) {
5075 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005076 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005077 alloc_size += sizeof_priv;
5078 }
5079 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005080 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005082 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005084 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085 return NULL;
5086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087
Stephen Hemminger79439862008-07-21 13:28:44 -07005088 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005089 if (!tx) {
5090 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5091 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005092 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005093 }
5094
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005095 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005097
5098 if (dev_addr_init(dev))
5099 goto free_tx;
5100
Jiri Pirkoccffad252009-05-22 23:22:17 +00005101 dev_unicast_init(dev);
5102
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005103 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
David S. Millere8a04642008-07-17 00:34:19 -07005105 dev->_tx = tx;
5106 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005107 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005108
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005109 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110
David S. Millerbb949fb2008-07-08 16:55:56 -07005111 netdev_init_queues(dev);
5112
Herbert Xud565b0a2008-12-15 23:38:52 -08005113 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005114 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005115 setup(dev);
5116 strcpy(dev->name, name);
5117 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005118
5119free_tx:
5120 kfree(tx);
5121
5122free_p:
5123 kfree(p);
5124 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005126EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
5128/**
5129 * free_netdev - free network device
5130 * @dev: device
5131 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005132 * This function does the last stage of destroying an allocated device
5133 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134 * If this is the last reference then it will be freed.
5135 */
5136void free_netdev(struct net_device *dev)
5137{
Herbert Xud565b0a2008-12-15 23:38:52 -08005138 struct napi_struct *p, *n;
5139
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005140 release_net(dev_net(dev));
5141
David S. Millere8a04642008-07-17 00:34:19 -07005142 kfree(dev->_tx);
5143
Jiri Pirkof001fde2009-05-05 02:48:28 +00005144 /* Flush device addresses */
5145 dev_addr_flush(dev);
5146
Herbert Xud565b0a2008-12-15 23:38:52 -08005147 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5148 netif_napi_del(p);
5149
Stephen Hemminger3041a062006-05-26 13:25:24 -07005150 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 if (dev->reg_state == NETREG_UNINITIALIZED) {
5152 kfree((char *)dev - dev->padded);
5153 return;
5154 }
5155
5156 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5157 dev->reg_state = NETREG_RELEASED;
5158
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005159 /* will free via device release */
5160 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005162
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005163/**
5164 * synchronize_net - Synchronize with packet receive processing
5165 *
5166 * Wait for packets currently being received to be done.
5167 * Does not block later packets from starting.
5168 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005169void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170{
5171 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005172 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173}
5174
5175/**
5176 * unregister_netdevice - remove device from the kernel
5177 * @dev: device
5178 *
5179 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005180 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181 *
5182 * Callers must hold the rtnl semaphore. You may want
5183 * unregister_netdev() instead of this.
5184 */
5185
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005186void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187{
Herbert Xua6620712007-12-12 19:21:56 -08005188 ASSERT_RTNL();
5189
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005190 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 /* Finish processing unregister after unlock */
5192 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193}
5194
5195/**
5196 * unregister_netdev - remove device from the kernel
5197 * @dev: device
5198 *
5199 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005200 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 *
5202 * This is just a wrapper for unregister_netdevice that takes
5203 * the rtnl semaphore. In general you want to use this and not
5204 * unregister_netdevice.
5205 */
5206void unregister_netdev(struct net_device *dev)
5207{
5208 rtnl_lock();
5209 unregister_netdevice(dev);
5210 rtnl_unlock();
5211}
5212
5213EXPORT_SYMBOL(unregister_netdev);
5214
Eric W. Biedermance286d32007-09-12 13:53:49 +02005215/**
5216 * dev_change_net_namespace - move device to different nethost namespace
5217 * @dev: device
5218 * @net: network namespace
5219 * @pat: If not NULL name pattern to try if the current device name
5220 * is already taken in the destination network namespace.
5221 *
5222 * This function shuts down a device interface and moves it
5223 * to a new network namespace. On success 0 is returned, on
5224 * a failure a netagive errno code is returned.
5225 *
5226 * Callers must hold the rtnl semaphore.
5227 */
5228
5229int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5230{
5231 char buf[IFNAMSIZ];
5232 const char *destname;
5233 int err;
5234
5235 ASSERT_RTNL();
5236
5237 /* Don't allow namespace local devices to be moved. */
5238 err = -EINVAL;
5239 if (dev->features & NETIF_F_NETNS_LOCAL)
5240 goto out;
5241
Eric W. Biederman38918452008-10-27 17:51:47 -07005242#ifdef CONFIG_SYSFS
5243 /* Don't allow real devices to be moved when sysfs
5244 * is enabled.
5245 */
5246 err = -EINVAL;
5247 if (dev->dev.parent)
5248 goto out;
5249#endif
5250
Eric W. Biedermance286d32007-09-12 13:53:49 +02005251 /* Ensure the device has been registrered */
5252 err = -EINVAL;
5253 if (dev->reg_state != NETREG_REGISTERED)
5254 goto out;
5255
5256 /* Get out if there is nothing todo */
5257 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005258 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005259 goto out;
5260
5261 /* Pick the destination device name, and ensure
5262 * we can use it in the destination network namespace.
5263 */
5264 err = -EEXIST;
5265 destname = dev->name;
5266 if (__dev_get_by_name(net, destname)) {
5267 /* We get here if we can't use the current device name */
5268 if (!pat)
5269 goto out;
5270 if (!dev_valid_name(pat))
5271 goto out;
5272 if (strchr(pat, '%')) {
5273 if (__dev_alloc_name(net, pat, buf) < 0)
5274 goto out;
5275 destname = buf;
5276 } else
5277 destname = pat;
5278 if (__dev_get_by_name(net, destname))
5279 goto out;
5280 }
5281
5282 /*
5283 * And now a mini version of register_netdevice unregister_netdevice.
5284 */
5285
5286 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005287 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005288
5289 /* And unlink it from device chain */
5290 err = -ENODEV;
5291 unlist_netdevice(dev);
5292
5293 synchronize_net();
5294
5295 /* Shutdown queueing discipline. */
5296 dev_shutdown(dev);
5297
5298 /* Notify protocols, that we are about to destroy
5299 this device. They should clean all the things.
5300 */
5301 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5302
5303 /*
5304 * Flush the unicast and multicast chains
5305 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005306 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005307 dev_addr_discard(dev);
5308
Eric W. Biederman38918452008-10-27 17:51:47 -07005309 netdev_unregister_kobject(dev);
5310
Eric W. Biedermance286d32007-09-12 13:53:49 +02005311 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005312 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005313
5314 /* Assign the new device name */
5315 if (destname != dev->name)
5316 strcpy(dev->name, destname);
5317
5318 /* If there is an ifindex conflict assign a new one */
5319 if (__dev_get_by_index(net, dev->ifindex)) {
5320 int iflink = (dev->iflink == dev->ifindex);
5321 dev->ifindex = dev_new_index(net);
5322 if (iflink)
5323 dev->iflink = dev->ifindex;
5324 }
5325
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005326 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005327 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005328 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005329
5330 /* Add the device back in the hashes */
5331 list_netdevice(dev);
5332
5333 /* Notify protocols, that a new device appeared. */
5334 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5335
5336 synchronize_net();
5337 err = 0;
5338out:
5339 return err;
5340}
5341
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342static int dev_cpu_callback(struct notifier_block *nfb,
5343 unsigned long action,
5344 void *ocpu)
5345{
5346 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005347 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005348 struct sk_buff *skb;
5349 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5350 struct softnet_data *sd, *oldsd;
5351
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005352 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005353 return NOTIFY_OK;
5354
5355 local_irq_disable();
5356 cpu = smp_processor_id();
5357 sd = &per_cpu(softnet_data, cpu);
5358 oldsd = &per_cpu(softnet_data, oldcpu);
5359
5360 /* Find end of our completion_queue. */
5361 list_skb = &sd->completion_queue;
5362 while (*list_skb)
5363 list_skb = &(*list_skb)->next;
5364 /* Append completion queue from offline CPU. */
5365 *list_skb = oldsd->completion_queue;
5366 oldsd->completion_queue = NULL;
5367
5368 /* Find end of our output_queue. */
5369 list_net = &sd->output_queue;
5370 while (*list_net)
5371 list_net = &(*list_net)->next_sched;
5372 /* Append output queue from offline CPU. */
5373 *list_net = oldsd->output_queue;
5374 oldsd->output_queue = NULL;
5375
5376 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5377 local_irq_enable();
5378
5379 /* Process offline CPU's input_pkt_queue */
5380 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5381 netif_rx(skb);
5382
5383 return NOTIFY_OK;
5384}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385
5386
Herbert Xu7f353bf2007-08-10 15:47:58 -07005387/**
Herbert Xub63365a2008-10-23 01:11:29 -07005388 * netdev_increment_features - increment feature set by one
5389 * @all: current feature set
5390 * @one: new feature set
5391 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005392 *
5393 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005394 * @one to the master device with current feature set @all. Will not
5395 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005396 */
Herbert Xub63365a2008-10-23 01:11:29 -07005397unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5398 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005399{
Herbert Xub63365a2008-10-23 01:11:29 -07005400 /* If device needs checksumming, downgrade to it. */
5401 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5402 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5403 else if (mask & NETIF_F_ALL_CSUM) {
5404 /* If one device supports v4/v6 checksumming, set for all. */
5405 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5406 !(all & NETIF_F_GEN_CSUM)) {
5407 all &= ~NETIF_F_ALL_CSUM;
5408 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5409 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005410
Herbert Xub63365a2008-10-23 01:11:29 -07005411 /* If one device supports hw checksumming, set for all. */
5412 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5413 all &= ~NETIF_F_ALL_CSUM;
5414 all |= NETIF_F_HW_CSUM;
5415 }
5416 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005417
Herbert Xub63365a2008-10-23 01:11:29 -07005418 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005419
Herbert Xub63365a2008-10-23 01:11:29 -07005420 one |= all & NETIF_F_ONE_FOR_ALL;
5421 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5422 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005423
5424 return all;
5425}
Herbert Xub63365a2008-10-23 01:11:29 -07005426EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005427
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005428static struct hlist_head *netdev_create_hash(void)
5429{
5430 int i;
5431 struct hlist_head *hash;
5432
5433 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5434 if (hash != NULL)
5435 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5436 INIT_HLIST_HEAD(&hash[i]);
5437
5438 return hash;
5439}
5440
Eric W. Biederman881d9662007-09-17 11:56:21 -07005441/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005442static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005443{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005444 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005445
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005446 net->dev_name_head = netdev_create_hash();
5447 if (net->dev_name_head == NULL)
5448 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005449
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005450 net->dev_index_head = netdev_create_hash();
5451 if (net->dev_index_head == NULL)
5452 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005453
5454 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005455
5456err_idx:
5457 kfree(net->dev_name_head);
5458err_name:
5459 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005460}
5461
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005462/**
5463 * netdev_drivername - network driver for the device
5464 * @dev: network device
5465 * @buffer: buffer for resulting name
5466 * @len: size of buffer
5467 *
5468 * Determine network driver for device.
5469 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005470char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005471{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005472 const struct device_driver *driver;
5473 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005474
5475 if (len <= 0 || !buffer)
5476 return buffer;
5477 buffer[0] = 0;
5478
5479 parent = dev->dev.parent;
5480
5481 if (!parent)
5482 return buffer;
5483
5484 driver = parent->driver;
5485 if (driver && driver->name)
5486 strlcpy(buffer, driver->name, len);
5487 return buffer;
5488}
5489
Pavel Emelyanov46650792007-10-08 20:38:39 -07005490static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005491{
5492 kfree(net->dev_name_head);
5493 kfree(net->dev_index_head);
5494}
5495
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005496static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005497 .init = netdev_init,
5498 .exit = netdev_exit,
5499};
5500
Pavel Emelyanov46650792007-10-08 20:38:39 -07005501static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005502{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005503 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005504 /*
5505 * Push all migratable of the network devices back to the
5506 * initial network namespace
5507 */
5508 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005509restart:
5510 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005511 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005512 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005513
5514 /* Ignore unmoveable devices (i.e. loopback) */
5515 if (dev->features & NETIF_F_NETNS_LOCAL)
5516 continue;
5517
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005518 /* Delete virtual devices */
5519 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5520 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005521 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005522 }
5523
Eric W. Biedermance286d32007-09-12 13:53:49 +02005524 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005525 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5526 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005527 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005528 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005529 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005530 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005531 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005532 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005533 }
5534 rtnl_unlock();
5535}
5536
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005537static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005538 .exit = default_device_exit,
5539};
5540
Linus Torvalds1da177e2005-04-16 15:20:36 -07005541/*
5542 * Initialize the DEV module. At boot time this walks the device list and
5543 * unhooks any devices that fail to initialise (normally hardware not
5544 * present) and leaves us with a valid list of present and active devices.
5545 *
5546 */
5547
5548/*
5549 * This is called single threaded during boot, so no need
5550 * to take the rtnl semaphore.
5551 */
5552static int __init net_dev_init(void)
5553{
5554 int i, rc = -ENOMEM;
5555
5556 BUG_ON(!dev_boot_phase);
5557
Linus Torvalds1da177e2005-04-16 15:20:36 -07005558 if (dev_proc_init())
5559 goto out;
5560
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005561 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562 goto out;
5563
5564 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005565 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 INIT_LIST_HEAD(&ptype_base[i]);
5567
Eric W. Biederman881d9662007-09-17 11:56:21 -07005568 if (register_pernet_subsys(&netdev_net_ops))
5569 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570
5571 /*
5572 * Initialise the packet receive queues.
5573 */
5574
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005575 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576 struct softnet_data *queue;
5577
5578 queue = &per_cpu(softnet_data, i);
5579 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580 queue->completion_queue = NULL;
5581 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005582
5583 queue->backlog.poll = process_backlog;
5584 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005585 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005586 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587 }
5588
Linus Torvalds1da177e2005-04-16 15:20:36 -07005589 dev_boot_phase = 0;
5590
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005591 /* The loopback device is special if any other network devices
5592 * is present in a network namespace the loopback device must
5593 * be present. Since we now dynamically allocate and free the
5594 * loopback device ensure this invariant is maintained by
5595 * keeping the loopback device as the first device on the
5596 * list of network devices. Ensuring the loopback devices
5597 * is the first device that appears and the last network device
5598 * that disappears.
5599 */
5600 if (register_pernet_device(&loopback_net_ops))
5601 goto out;
5602
5603 if (register_pernet_device(&default_device_ops))
5604 goto out;
5605
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005606 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5607 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005608
5609 hotcpu_notifier(dev_cpu_callback, 0);
5610 dst_init();
5611 dev_mcast_init();
5612 rc = 0;
5613out:
5614 return rc;
5615}
5616
5617subsys_initcall(net_dev_init);
5618
Krishna Kumare88721f2009-02-18 17:55:02 -08005619static int __init initialize_hashrnd(void)
5620{
5621 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5622 return 0;
5623}
5624
5625late_initcall_sync(initialize_hashrnd);
5626
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627EXPORT_SYMBOL(__dev_get_by_index);
5628EXPORT_SYMBOL(__dev_get_by_name);
5629EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005630EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631EXPORT_SYMBOL(dev_add_pack);
5632EXPORT_SYMBOL(dev_alloc_name);
5633EXPORT_SYMBOL(dev_close);
5634EXPORT_SYMBOL(dev_get_by_flags);
5635EXPORT_SYMBOL(dev_get_by_index);
5636EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637EXPORT_SYMBOL(dev_open);
5638EXPORT_SYMBOL(dev_queue_xmit);
5639EXPORT_SYMBOL(dev_remove_pack);
5640EXPORT_SYMBOL(dev_set_allmulti);
5641EXPORT_SYMBOL(dev_set_promiscuity);
5642EXPORT_SYMBOL(dev_change_flags);
5643EXPORT_SYMBOL(dev_set_mtu);
5644EXPORT_SYMBOL(dev_set_mac_address);
5645EXPORT_SYMBOL(free_netdev);
5646EXPORT_SYMBOL(netdev_boot_setup_check);
5647EXPORT_SYMBOL(netdev_set_master);
5648EXPORT_SYMBOL(netdev_state_change);
5649EXPORT_SYMBOL(netif_receive_skb);
5650EXPORT_SYMBOL(netif_rx);
5651EXPORT_SYMBOL(register_gifconf);
5652EXPORT_SYMBOL(register_netdevice);
5653EXPORT_SYMBOL(register_netdevice_notifier);
5654EXPORT_SYMBOL(skb_checksum_help);
5655EXPORT_SYMBOL(synchronize_net);
5656EXPORT_SYMBOL(unregister_netdevice);
5657EXPORT_SYMBOL(unregister_netdevice_notifier);
5658EXPORT_SYMBOL(net_enable_timestamp);
5659EXPORT_SYMBOL(net_disable_timestamp);
5660EXPORT_SYMBOL(dev_get_flags);
5661
5662#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5663EXPORT_SYMBOL(br_handle_frame_hook);
5664EXPORT_SYMBOL(br_fdb_get_hook);
5665EXPORT_SYMBOL(br_fdb_put_hook);
5666#endif
5667
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669
5670EXPORT_PER_CPU_SYMBOL(softnet_data);