blob: a90737fe24726222ecffba1f63682337c89bf1bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700130#include "net-sysfs.h"
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
135 *
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
138 *
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700143 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 * --BLG
145 *
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
158 */
159
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800160#define PTYPE_HASH_SIZE (16)
161#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800164static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700165static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Chris Leechdb217332006-06-17 21:24:58 -0700167#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700168struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700172 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700173};
174
175static enum dma_state_client
176netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
178
179static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
182 },
183};
Chris Leechdb217332006-06-17 21:24:58 -0700184#endif
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * semaphore.
189 *
190 * Pure readers hold dev_base_lock for reading.
191 *
192 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700193 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
196 *
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
200 *
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205DEFINE_RWLOCK(dev_base_lock);
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207EXPORT_SYMBOL(dev_base_lock);
208
209#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Eric W. Biederman881d9662007-09-17 11:56:21 -0700212static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biederman881d9662007-09-17 11:56:21 -0700218static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900226 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
236}
237
238/* Device list removal */
239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
249}
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
252 * Our notifier list
253 */
254
Alan Sternf07d5b92006-05-09 15:23:03 -0700255static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257/*
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
260 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700261
262DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
David S. Millercf508b12008-07-22 14:16:42 -0700264#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700265/*
David S. Millerc773e842008-07-08 23:13:53 -0700266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700267 * according to dev->type
268 */
269static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
285
286static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305
306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
307{
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
315}
316
David S. Millercf508b12008-07-22 14:16:42 -0700317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319{
320 int i;
321
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
325}
David S. Millercf508b12008-07-22 14:16:42 -0700326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336#else
David S. Millercf508b12008-07-22 14:16:42 -0700337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700342{
343}
344#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/*******************************************************************************
347
348 Protocol management and registration routines
349
350*******************************************************************************/
351
352/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
356 *
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
366 */
367
368/**
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
371 *
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
375 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900376 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
379 */
380
381void dev_add_pack(struct packet_type *pt)
382{
383 int hash;
384
385 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700386 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700388 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 list_add_rcu(&pt->list, &ptype_base[hash]);
391 }
392 spin_unlock_bh(&ptype_lock);
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/**
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
398 *
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900402 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 *
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
407 */
408void __dev_remove_pack(struct packet_type *pt)
409{
410 struct list_head *head;
411 struct packet_type *pt1;
412
413 spin_lock_bh(&ptype_lock);
414
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700415 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700417 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
424 }
425 }
426
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428out:
429 spin_unlock_bh(&ptype_lock);
430}
431/**
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
434 *
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
439 *
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
442 */
443void dev_remove_pack(struct packet_type *pt)
444{
445 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 synchronize_net();
448}
449
450/******************************************************************************
451
452 Device Boot-time Settings Routines
453
454*******************************************************************************/
455
456/* Boot time configuration table */
457static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
458
459/**
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
463 *
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
467 */
468static int netdev_boot_setup_add(char *name, struct ifmap *map)
469{
470 struct netdev_boot_setup *s;
471 int i;
472
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700477 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
480 }
481 }
482
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
484}
485
486/**
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
489 *
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
494 */
495int netdev_boot_setup_check(struct net_device *dev)
496{
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
499
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700502 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
508 }
509 }
510 return 0;
511}
512
513
514/**
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
518 *
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
523 */
524unsigned long netdev_boot_base(const char *prefix, int unit)
525{
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
529
530 sprintf(name, "%s%d", prefix, unit);
531
532 /*
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
535 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700536 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 1;
538
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
543}
544
545/*
546 * Saves at boot time configured settings for any netdevice.
547 */
548int __init netdev_boot_setup(char *str)
549{
550 int ints[5];
551 struct ifmap map;
552
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
556
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
567
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
570}
571
572__setup("netdev=", netdev_boot_setup);
573
574/*******************************************************************************
575
576 Device Interface Subroutines
577
578*******************************************************************************/
579
580/**
581 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700582 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 * @name: name to find
584 *
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
590 */
591
Eric W. Biederman881d9662007-09-17 11:56:21 -0700592struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
594 struct hlist_node *p;
595
Eric W. Biederman881d9662007-09-17 11:56:21 -0700596 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
601 }
602 return NULL;
603}
604
605/**
606 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700607 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * @name: name to find
609 *
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
615 */
616
Eric W. Biederman881d9662007-09-17 11:56:21 -0700617struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct net_device *dev;
620
621 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
627}
628
629/**
630 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @ifindex: index of device
633 *
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct hlist_node *p;
644
Eric W. Biederman881d9662007-09-17 11:56:21 -0700645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
650 }
651 return NULL;
652}
653
654
655/**
656 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700657 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 * @ifindex: index of device
659 *
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct net_device *dev;
669
670 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700671 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
676}
677
678/**
679 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700680 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 * @type: media type of device
682 * @ha: hardware address
683 *
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
688 *
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
691 */
692
Eric W. Biederman881d9662007-09-17 11:56:21 -0700693struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 struct net_device *dev;
696
697 ASSERT_RTNL();
698
Denis V. Lunev81103a52007-12-12 10:47:38 -0800699 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700702 return dev;
703
704 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300707EXPORT_SYMBOL(dev_getbyhwaddr);
708
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710{
711 struct net_device *dev;
712
713 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700714 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700715 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700716 return dev;
717
718 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719}
720
721EXPORT_SYMBOL(__dev_getfirstbyhwtype);
722
Eric W. Biederman881d9662007-09-17 11:56:21 -0700723struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
725 struct net_device *dev;
726
727 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700728 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700729 if (dev)
730 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 rtnl_unlock();
732 return dev;
733}
734
735EXPORT_SYMBOL(dev_getfirstbyhwtype);
736
737/**
738 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700739 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
742 *
743 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900744 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
747 */
748
Eric W. Biederman881d9662007-09-17 11:56:21 -0700749struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700755 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700758 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 break;
760 }
761 }
762 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700763 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764}
765
766/**
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
769 *
770 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800774int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700776 if (*name == '\0')
777 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
782
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
787 }
788 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
791/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200795 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 *
797 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 */
805
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200806static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
808 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700811 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct net_device *d;
813
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
816 /*
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
820 */
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
823
824 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (!inuse)
827 return -ENOMEM;
828
Eric W. Biederman881d9662007-09-17 11:56:21 -0700829 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
834
835 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200836 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
839 }
840
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
843 }
844
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
852 */
853 return -ENFILE;
854}
855
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200856/**
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
868 */
869
870int dev_alloc_name(struct net_device *dev, const char *name)
871{
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
875
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
882}
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
893int dev_change_name(struct net_device *dev, char *newname)
894{
Herbert Xufcc5a032007-07-30 17:03:38 -0700895 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700898 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900901 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900903 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913 memcpy(oldname, dev->name, IFNAMSIZ);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
919 strcpy(newname, dev->name);
920 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700921 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -EEXIST;
923 else
924 strlcpy(dev->name, newname, IFNAMSIZ);
925
Herbert Xufcc5a032007-07-30 17:03:38 -0700926rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700927 err = device_rename(&dev->dev, dev->name);
928 if (err) {
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 return err;
931 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700932
933 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600934 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700936 write_unlock_bh(&dev_base_lock);
937
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700939 ret = notifier_to_errno(ret);
940
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
950 }
951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
953 return err;
954}
955
956/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700957 * dev_set_alias - change ifalias of a device
958 * @dev: device
959 * @alias: name up to IFALIASZ
960 *
961 * Set ifalias for a device,
962 */
963int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
964{
965 ASSERT_RTNL();
966
967 if (len >= IFALIASZ)
968 return -EINVAL;
969
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700970 if (!len) {
971 if (dev->ifalias) {
972 kfree(dev->ifalias);
973 dev->ifalias = NULL;
974 }
975 return 0;
976 }
977
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700978 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
979 if (!dev->ifalias)
980 return -ENOMEM;
981
982 strlcpy(dev->ifalias, alias, len+1);
983 return len;
984}
985
986
987/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700988 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700989 * @dev: device to cause notification
990 *
991 * Called to indicate a device has changed features.
992 */
993void netdev_features_change(struct net_device *dev)
994{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700995 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700996}
997EXPORT_SYMBOL(netdev_features_change);
998
999/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 * netdev_state_change - device changes state
1001 * @dev: device to cause notification
1002 *
1003 * Called to indicate a device has changed state. This function calls
1004 * the notifier chains for netdev_chain and sends a NEWLINK message
1005 * to the routing socket.
1006 */
1007void netdev_state_change(struct net_device *dev)
1008{
1009 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001010 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1012 }
1013}
1014
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001015void netdev_bonding_change(struct net_device *dev)
1016{
1017 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1018}
1019EXPORT_SYMBOL(netdev_bonding_change);
1020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021/**
1022 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001023 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 * @name: name of interface
1025 *
1026 * If a network interface is not present and the process has suitable
1027 * privileges this function loads the module. If module loading is not
1028 * available in this kernel then it becomes a nop.
1029 */
1030
Eric W. Biederman881d9662007-09-17 11:56:21 -07001031void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001033 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001036 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 read_unlock(&dev_base_lock);
1038
1039 if (!dev && capable(CAP_SYS_MODULE))
1040 request_module("%s", name);
1041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/**
1044 * dev_open - prepare an interface for use.
1045 * @dev: device to open
1046 *
1047 * Takes a device from down to up state. The device's private open
1048 * function is invoked and then the multicast lists are loaded. Finally
1049 * the device is moved into the up state and a %NETDEV_UP message is
1050 * sent to the netdev notifier chain.
1051 *
1052 * Calling this function on an active interface is a nop. On a failure
1053 * a negative errno code is returned.
1054 */
1055int dev_open(struct net_device *dev)
1056{
1057 int ret = 0;
1058
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001059 ASSERT_RTNL();
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 /*
1062 * Is it already up?
1063 */
1064
1065 if (dev->flags & IFF_UP)
1066 return 0;
1067
1068 /*
1069 * Is it even present?
1070 */
1071 if (!netif_device_present(dev))
1072 return -ENODEV;
1073
1074 /*
1075 * Call device private open method
1076 */
1077 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001078
1079 if (dev->validate_addr)
1080 ret = dev->validate_addr(dev);
1081
1082 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001085 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 * If it went open OK then:
1087 */
1088
Jeff Garzikbada3392007-10-23 20:19:37 -07001089 if (ret)
1090 clear_bit(__LINK_STATE_START, &dev->state);
1091 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 /*
1093 * Set the flags.
1094 */
1095 dev->flags |= IFF_UP;
1096
1097 /*
1098 * Initialize multicasting status
1099 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001100 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 /*
1103 * Wakeup transmit queue engine
1104 */
1105 dev_activate(dev);
1106
1107 /*
1108 * ... and announce new interface.
1109 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001110 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 return ret;
1114}
1115
1116/**
1117 * dev_close - shutdown an interface.
1118 * @dev: device to shutdown
1119 *
1120 * This function moves an active device into down state. A
1121 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1122 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1123 * chain.
1124 */
1125int dev_close(struct net_device *dev)
1126{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001127 ASSERT_RTNL();
1128
David S. Miller9d5010d2007-09-12 14:33:25 +02001129 might_sleep();
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001150 dev_deactivate(dev);
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
1159 if (dev->stop)
1160 dev->stop(dev);
1161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001171 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 return 0;
1174}
1175
1176
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001177/**
1178 * dev_disable_lro - disable Large Receive Offload on a device
1179 * @dev: device
1180 *
1181 * Disable Large Receive Offload (LRO) on a net device. Must be
1182 * called under RTNL. This is needed if received packets may be
1183 * forwarded to another interface.
1184 */
1185void dev_disable_lro(struct net_device *dev)
1186{
1187 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1188 dev->ethtool_ops->set_flags) {
1189 u32 flags = dev->ethtool_ops->get_flags(dev);
1190 if (flags & ETH_FLAG_LRO) {
1191 flags &= ~ETH_FLAG_LRO;
1192 dev->ethtool_ops->set_flags(dev, flags);
1193 }
1194 }
1195 WARN_ON(dev->features & NETIF_F_LRO);
1196}
1197EXPORT_SYMBOL(dev_disable_lro);
1198
1199
Eric W. Biederman881d9662007-09-17 11:56:21 -07001200static int dev_boot_phase = 1;
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202/*
1203 * Device change register/unregister. These are not inline or static
1204 * as we export them to the world.
1205 */
1206
1207/**
1208 * register_netdevice_notifier - register a network notifier block
1209 * @nb: notifier
1210 *
1211 * Register a notifier to be called when network device events occur.
1212 * The notifier passed is linked into the kernel structures and must
1213 * not be reused until it has been unregistered. A negative errno code
1214 * is returned on a failure.
1215 *
1216 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001217 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 * view of the network device list.
1219 */
1220
1221int register_netdevice_notifier(struct notifier_block *nb)
1222{
1223 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001224 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001225 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 int err;
1227
1228 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001229 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001230 if (err)
1231 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001232 if (dev_boot_phase)
1233 goto unlock;
1234 for_each_net(net) {
1235 for_each_netdev(net, dev) {
1236 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1237 err = notifier_to_errno(err);
1238 if (err)
1239 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Eric W. Biederman881d9662007-09-17 11:56:21 -07001241 if (!(dev->flags & IFF_UP))
1242 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001243
Eric W. Biederman881d9662007-09-17 11:56:21 -07001244 nb->notifier_call(nb, NETDEV_UP, dev);
1245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001247
1248unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 rtnl_unlock();
1250 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001251
1252rollback:
1253 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001254 for_each_net(net) {
1255 for_each_netdev(net, dev) {
1256 if (dev == last)
1257 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001258
Eric W. Biederman881d9662007-09-17 11:56:21 -07001259 if (dev->flags & IFF_UP) {
1260 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1261 nb->notifier_call(nb, NETDEV_DOWN, dev);
1262 }
1263 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001264 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001265 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001266
1267 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001268 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269}
1270
1271/**
1272 * unregister_netdevice_notifier - unregister a network notifier block
1273 * @nb: notifier
1274 *
1275 * Unregister a notifier previously registered by
1276 * register_netdevice_notifier(). The notifier is unlinked into the
1277 * kernel structures and may then be reused. A negative errno code
1278 * is returned on a failure.
1279 */
1280
1281int unregister_netdevice_notifier(struct notifier_block *nb)
1282{
Herbert Xu9f514952006-03-25 01:24:25 -08001283 int err;
1284
1285 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001286 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001287 rtnl_unlock();
1288 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289}
1290
1291/**
1292 * call_netdevice_notifiers - call all network notifier blocks
1293 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001294 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 *
1296 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001297 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 */
1299
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001300int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001302 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303}
1304
1305/* When > 0 there are consumers of rx skb time stamps */
1306static atomic_t netstamp_needed = ATOMIC_INIT(0);
1307
1308void net_enable_timestamp(void)
1309{
1310 atomic_inc(&netstamp_needed);
1311}
1312
1313void net_disable_timestamp(void)
1314{
1315 atomic_dec(&netstamp_needed);
1316}
1317
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001318static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319{
1320 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001321 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001322 else
1323 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324}
1325
1326/*
1327 * Support routine. Sends outgoing frames to any network
1328 * taps currently in use.
1329 */
1330
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001331static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332{
1333 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001334
1335 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
1337 rcu_read_lock();
1338 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1339 /* Never send packets back to the socket
1340 * they originated from - MvS (miquels@drinkel.ow.org)
1341 */
1342 if ((ptype->dev == dev || !ptype->dev) &&
1343 (ptype->af_packet_priv == NULL ||
1344 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1345 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1346 if (!skb2)
1347 break;
1348
1349 /* skb->nh should be correctly
1350 set by sender, so that the second statement is
1351 just protection against buggy protocols.
1352 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001353 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001355 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001356 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 if (net_ratelimit())
1358 printk(KERN_CRIT "protocol %04x is "
1359 "buggy, dev %s\n",
1360 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001361 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 }
1363
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001364 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001366 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 }
1368 }
1369 rcu_read_unlock();
1370}
1371
Denis Vlasenko56079432006-03-29 15:57:29 -08001372
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001373static inline void __netif_reschedule(struct Qdisc *q)
1374{
1375 struct softnet_data *sd;
1376 unsigned long flags;
1377
1378 local_irq_save(flags);
1379 sd = &__get_cpu_var(softnet_data);
1380 q->next_sched = sd->output_queue;
1381 sd->output_queue = q;
1382 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1383 local_irq_restore(flags);
1384}
1385
David S. Miller37437bb2008-07-16 02:15:04 -07001386void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001387{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001388 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1389 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001390}
1391EXPORT_SYMBOL(__netif_schedule);
1392
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001393void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001394{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001395 if (atomic_dec_and_test(&skb->users)) {
1396 struct softnet_data *sd;
1397 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001398
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001399 local_irq_save(flags);
1400 sd = &__get_cpu_var(softnet_data);
1401 skb->next = sd->completion_queue;
1402 sd->completion_queue = skb;
1403 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1404 local_irq_restore(flags);
1405 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001406}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001407EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001408
1409void dev_kfree_skb_any(struct sk_buff *skb)
1410{
1411 if (in_irq() || irqs_disabled())
1412 dev_kfree_skb_irq(skb);
1413 else
1414 dev_kfree_skb(skb);
1415}
1416EXPORT_SYMBOL(dev_kfree_skb_any);
1417
1418
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001419/**
1420 * netif_device_detach - mark device as removed
1421 * @dev: network device
1422 *
1423 * Mark device as removed from system and therefore no longer available.
1424 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001425void netif_device_detach(struct net_device *dev)
1426{
1427 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1428 netif_running(dev)) {
1429 netif_stop_queue(dev);
1430 }
1431}
1432EXPORT_SYMBOL(netif_device_detach);
1433
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001434/**
1435 * netif_device_attach - mark device as attached
1436 * @dev: network device
1437 *
1438 * Mark device as attached from system and restart if needed.
1439 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001440void netif_device_attach(struct net_device *dev)
1441{
1442 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1443 netif_running(dev)) {
1444 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001445 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001446 }
1447}
1448EXPORT_SYMBOL(netif_device_attach);
1449
Ben Hutchings6de329e2008-06-16 17:02:28 -07001450static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1451{
1452 return ((features & NETIF_F_GEN_CSUM) ||
1453 ((features & NETIF_F_IP_CSUM) &&
1454 protocol == htons(ETH_P_IP)) ||
1455 ((features & NETIF_F_IPV6_CSUM) &&
1456 protocol == htons(ETH_P_IPV6)));
1457}
1458
1459static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1460{
1461 if (can_checksum_protocol(dev->features, skb->protocol))
1462 return true;
1463
1464 if (skb->protocol == htons(ETH_P_8021Q)) {
1465 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1466 if (can_checksum_protocol(dev->features & dev->vlan_features,
1467 veh->h_vlan_encapsulated_proto))
1468 return true;
1469 }
1470
1471 return false;
1472}
Denis Vlasenko56079432006-03-29 15:57:29 -08001473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474/*
1475 * Invalidate hardware checksum when packet is to be mangled, and
1476 * complete checksum manually on outgoing path.
1477 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001478int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479{
Al Virod3bc23e2006-11-14 21:24:49 -08001480 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001481 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
Patrick McHardy84fa7932006-08-29 16:44:56 -07001483 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001484 goto out_set_summed;
1485
1486 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001487 /* Let GSO fix up the checksum. */
1488 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 }
1490
Herbert Xua0308472007-10-15 01:47:15 -07001491 offset = skb->csum_start - skb_headroom(skb);
1492 BUG_ON(offset >= skb_headlen(skb));
1493 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1494
1495 offset += skb->csum_offset;
1496 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1497
1498 if (skb_cloned(skb) &&
1499 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1501 if (ret)
1502 goto out;
1503 }
1504
Herbert Xua0308472007-10-15 01:47:15 -07001505 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001506out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001508out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return ret;
1510}
1511
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001512/**
1513 * skb_gso_segment - Perform segmentation on skb.
1514 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001515 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001516 *
1517 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001518 *
1519 * It may return NULL if the skb requires no segmentation. This is
1520 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001521 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001522struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001523{
1524 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1525 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001526 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001527 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001528
1529 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001530
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001531 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001532 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001533 __skb_pull(skb, skb->mac_len);
1534
Herbert Xuf9d106a2007-04-23 22:36:13 -07001535 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001536 if (skb_header_cloned(skb) &&
1537 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1538 return ERR_PTR(err);
1539 }
1540
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001541 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001542 list_for_each_entry_rcu(ptype,
1543 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001544 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001546 err = ptype->gso_send_check(skb);
1547 segs = ERR_PTR(err);
1548 if (err || skb_gso_ok(skb, features))
1549 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001550 __skb_push(skb, (skb->data -
1551 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001552 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001553 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001554 break;
1555 }
1556 }
1557 rcu_read_unlock();
1558
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001559 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001560
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001561 return segs;
1562}
1563
1564EXPORT_SYMBOL(skb_gso_segment);
1565
Herbert Xufb286bb2005-11-10 13:01:24 -08001566/* Take action when hardware reception checksum errors are detected. */
1567#ifdef CONFIG_BUG
1568void netdev_rx_csum_fault(struct net_device *dev)
1569{
1570 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001571 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001572 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001573 dump_stack();
1574 }
1575}
1576EXPORT_SYMBOL(netdev_rx_csum_fault);
1577#endif
1578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579/* Actually, we should eliminate this check as soon as we know, that:
1580 * 1. IOMMU is present and allows to map all the memory.
1581 * 2. No high memory really exists on this machine.
1582 */
1583
1584static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1585{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001586#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 int i;
1588
1589 if (dev->features & NETIF_F_HIGHDMA)
1590 return 0;
1591
1592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1593 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1594 return 1;
1595
Herbert Xu3d3a8532006-06-27 13:33:10 -07001596#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 return 0;
1598}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001600struct dev_gso_cb {
1601 void (*destructor)(struct sk_buff *skb);
1602};
1603
1604#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1605
1606static void dev_gso_skb_destructor(struct sk_buff *skb)
1607{
1608 struct dev_gso_cb *cb;
1609
1610 do {
1611 struct sk_buff *nskb = skb->next;
1612
1613 skb->next = nskb->next;
1614 nskb->next = NULL;
1615 kfree_skb(nskb);
1616 } while (skb->next);
1617
1618 cb = DEV_GSO_CB(skb);
1619 if (cb->destructor)
1620 cb->destructor(skb);
1621}
1622
1623/**
1624 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1625 * @skb: buffer to segment
1626 *
1627 * This function segments the given skb and stores the list of segments
1628 * in skb->next.
1629 */
1630static int dev_gso_segment(struct sk_buff *skb)
1631{
1632 struct net_device *dev = skb->dev;
1633 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001634 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1635 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001636
Herbert Xu576a30e2006-06-27 13:22:38 -07001637 segs = skb_gso_segment(skb, features);
1638
1639 /* Verifying header integrity only. */
1640 if (!segs)
1641 return 0;
1642
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001643 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001644 return PTR_ERR(segs);
1645
1646 skb->next = segs;
1647 DEV_GSO_CB(skb)->destructor = skb->destructor;
1648 skb->destructor = dev_gso_skb_destructor;
1649
1650 return 0;
1651}
1652
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001653int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1654 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001655{
1656 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001657 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001658 dev_queue_xmit_nit(skb, dev);
1659
Herbert Xu576a30e2006-06-27 13:22:38 -07001660 if (netif_needs_gso(dev, skb)) {
1661 if (unlikely(dev_gso_segment(skb)))
1662 goto out_kfree_skb;
1663 if (skb->next)
1664 goto gso;
1665 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666
Herbert Xu576a30e2006-06-27 13:22:38 -07001667 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001668 }
1669
Herbert Xu576a30e2006-06-27 13:22:38 -07001670gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001671 do {
1672 struct sk_buff *nskb = skb->next;
1673 int rc;
1674
1675 skb->next = nskb->next;
1676 nskb->next = NULL;
1677 rc = dev->hard_start_xmit(nskb, dev);
1678 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001679 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001680 skb->next = nskb;
1681 return rc;
1682 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001683 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001684 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001685 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001686
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001687 skb->destructor = DEV_GSO_CB(skb)->destructor;
1688
1689out_kfree_skb:
1690 kfree_skb(skb);
1691 return 0;
1692}
1693
David S. Millerb6b2fed2008-07-21 09:48:06 -07001694static u32 simple_tx_hashrnd;
1695static int simple_tx_hashrnd_initialized = 0;
1696
David S. Miller8f0f2222008-07-15 03:47:03 -07001697static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1698{
David S. Millerb6b2fed2008-07-21 09:48:06 -07001699 u32 addr1, addr2, ports;
1700 u32 hash, ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001701 u8 ip_proto;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001702
1703 if (unlikely(!simple_tx_hashrnd_initialized)) {
1704 get_random_bytes(&simple_tx_hashrnd, 4);
1705 simple_tx_hashrnd_initialized = 1;
1706 }
David S. Miller8f0f2222008-07-15 03:47:03 -07001707
1708 switch (skb->protocol) {
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -07001709 case htons(ETH_P_IP):
David S. Miller8f0f2222008-07-15 03:47:03 -07001710 ip_proto = ip_hdr(skb)->protocol;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001711 addr1 = ip_hdr(skb)->saddr;
1712 addr2 = ip_hdr(skb)->daddr;
David S. Miller8f0f2222008-07-15 03:47:03 -07001713 ihl = ip_hdr(skb)->ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001714 break;
Arnaldo Carvalho de Melo60678042008-09-20 22:20:49 -07001715 case htons(ETH_P_IPV6):
David S. Miller8f0f2222008-07-15 03:47:03 -07001716 ip_proto = ipv6_hdr(skb)->nexthdr;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001717 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1718 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
David S. Miller8f0f2222008-07-15 03:47:03 -07001719 ihl = (40 >> 2);
David S. Miller8f0f2222008-07-15 03:47:03 -07001720 break;
1721 default:
1722 return 0;
1723 }
1724
David S. Miller8f0f2222008-07-15 03:47:03 -07001725
1726 switch (ip_proto) {
1727 case IPPROTO_TCP:
1728 case IPPROTO_UDP:
1729 case IPPROTO_DCCP:
1730 case IPPROTO_ESP:
1731 case IPPROTO_AH:
1732 case IPPROTO_SCTP:
1733 case IPPROTO_UDPLITE:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001734 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
David S. Miller8f0f2222008-07-15 03:47:03 -07001735 break;
1736
1737 default:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001738 ports = 0;
David S. Miller8f0f2222008-07-15 03:47:03 -07001739 break;
1740 }
1741
David S. Millerb6b2fed2008-07-21 09:48:06 -07001742 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1743
1744 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001745}
1746
David S. Millere8a04642008-07-17 00:34:19 -07001747static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1748 struct sk_buff *skb)
1749{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001750 u16 queue_index = 0;
1751
David S. Millereae792b2008-07-15 03:03:33 -07001752 if (dev->select_queue)
1753 queue_index = dev->select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001754 else if (dev->real_num_tx_queues > 1)
1755 queue_index = simple_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001756
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001757 skb_set_queue_mapping(skb, queue_index);
1758 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001759}
1760
Dave Jonesd29f7492008-07-22 14:09:06 -07001761/**
1762 * dev_queue_xmit - transmit a buffer
1763 * @skb: buffer to transmit
1764 *
1765 * Queue a buffer for transmission to a network device. The caller must
1766 * have set the device and priority and built the buffer before calling
1767 * this function. The function can be called from an interrupt.
1768 *
1769 * A negative errno code is returned on a failure. A success does not
1770 * guarantee the frame will be transmitted as it may be dropped due
1771 * to congestion or traffic shaping.
1772 *
1773 * -----------------------------------------------------------------------------------
1774 * I notice this method can also return errors from the queue disciplines,
1775 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1776 * be positive.
1777 *
1778 * Regardless of the return value, the skb is consumed, so it is currently
1779 * difficult to retry a send to this method. (You can bump the ref count
1780 * before sending to hold a reference for retry if you are careful.)
1781 *
1782 * When calling this method, interrupts MUST be enabled. This is because
1783 * the BH enable code must have IRQs enabled so that it will not deadlock.
1784 * --BLG
1785 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786int dev_queue_xmit(struct sk_buff *skb)
1787{
1788 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001789 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 struct Qdisc *q;
1791 int rc = -ENOMEM;
1792
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001793 /* GSO will handle the following emulations directly. */
1794 if (netif_needs_gso(dev, skb))
1795 goto gso;
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (skb_shinfo(skb)->frag_list &&
1798 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001799 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 goto out_kfree_skb;
1801
1802 /* Fragmented skb is linearized if device does not support SG,
1803 * or if at least one of fragments is in highmem and device
1804 * does not support DMA from it.
1805 */
1806 if (skb_shinfo(skb)->nr_frags &&
1807 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001808 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 goto out_kfree_skb;
1810
1811 /* If packet is not checksummed and device does not support
1812 * checksumming for this protocol, complete checksumming here.
1813 */
Herbert Xu663ead32007-04-09 11:59:07 -07001814 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1815 skb_set_transport_header(skb, skb->csum_start -
1816 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001817 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1818 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001821gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001822 /* Disable soft irqs for various locks below. Also
1823 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001825 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
David S. Millereae792b2008-07-15 03:03:33 -07001827 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001828 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830#ifdef CONFIG_NET_CLS_ACT
1831 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1832#endif
1833 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001834 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
David S. Miller37437bb2008-07-16 02:15:04 -07001836 spin_lock(root_lock);
1837
David S. Millera9312ae2008-08-17 21:51:03 -07001838 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001839 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001840 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001841 } else {
1842 rc = qdisc_enqueue_root(skb, q);
1843 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001844 }
David S. Miller37437bb2008-07-16 02:15:04 -07001845 spin_unlock(root_lock);
1846
David S. Miller37437bb2008-07-16 02:15:04 -07001847 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 }
1849
1850 /* The device has no queue. Common case for software devices:
1851 loopback, all the sorts of tunnels...
1852
Herbert Xu932ff272006-06-09 12:20:56 -07001853 Really, it is unlikely that netif_tx_lock protection is necessary
1854 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 counters.)
1856 However, it is possible, that they rely on protection
1857 made by us here.
1858
1859 Check this and shot the lock. It is not prone from deadlocks.
1860 Either shot noqueue qdisc, it is even simpler 8)
1861 */
1862 if (dev->flags & IFF_UP) {
1863 int cpu = smp_processor_id(); /* ok because BHs are off */
1864
David S. Millerc773e842008-07-08 23:13:53 -07001865 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
David S. Millerc773e842008-07-08 23:13:53 -07001867 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001869 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001871 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001872 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 goto out;
1874 }
1875 }
David S. Millerc773e842008-07-08 23:13:53 -07001876 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 if (net_ratelimit())
1878 printk(KERN_CRIT "Virtual device %s asks to "
1879 "queue packet!\n", dev->name);
1880 } else {
1881 /* Recursion is detected! It is possible,
1882 * unfortunately */
1883 if (net_ratelimit())
1884 printk(KERN_CRIT "Dead loop on virtual device "
1885 "%s, fix it urgently!\n", dev->name);
1886 }
1887 }
1888
1889 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001890 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892out_kfree_skb:
1893 kfree_skb(skb);
1894 return rc;
1895out:
Herbert Xud4828d82006-06-22 02:28:18 -07001896 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 return rc;
1898}
1899
1900
1901/*=======================================================================
1902 Receiver routines
1903 =======================================================================*/
1904
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001905int netdev_max_backlog __read_mostly = 1000;
1906int netdev_budget __read_mostly = 300;
1907int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1910
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912/**
1913 * netif_rx - post buffer to the network code
1914 * @skb: buffer to post
1915 *
1916 * This function receives a packet from a device driver and queues it for
1917 * the upper (protocol) levels to process. It always succeeds. The buffer
1918 * may be dropped during processing for congestion control or by the
1919 * protocol layers.
1920 *
1921 * return values:
1922 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 * NET_RX_DROP (packet was dropped)
1924 *
1925 */
1926
1927int netif_rx(struct sk_buff *skb)
1928{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 struct softnet_data *queue;
1930 unsigned long flags;
1931
1932 /* if netpoll wants it, pretend we never saw it */
1933 if (netpoll_rx(skb))
1934 return NET_RX_DROP;
1935
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001936 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001937 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 /*
1940 * The code is rearranged so that the path is the most
1941 * short when CPU is congested, but is still operating.
1942 */
1943 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 queue = &__get_cpu_var(softnet_data);
1945
1946 __get_cpu_var(netdev_rx_stat).total++;
1947 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1948 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001952 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 }
1954
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001955 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 goto enqueue;
1957 }
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 __get_cpu_var(netdev_rx_stat).dropped++;
1960 local_irq_restore(flags);
1961
1962 kfree_skb(skb);
1963 return NET_RX_DROP;
1964}
1965
1966int netif_rx_ni(struct sk_buff *skb)
1967{
1968 int err;
1969
1970 preempt_disable();
1971 err = netif_rx(skb);
1972 if (local_softirq_pending())
1973 do_softirq();
1974 preempt_enable();
1975
1976 return err;
1977}
1978
1979EXPORT_SYMBOL(netif_rx_ni);
1980
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981static void net_tx_action(struct softirq_action *h)
1982{
1983 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1984
1985 if (sd->completion_queue) {
1986 struct sk_buff *clist;
1987
1988 local_irq_disable();
1989 clist = sd->completion_queue;
1990 sd->completion_queue = NULL;
1991 local_irq_enable();
1992
1993 while (clist) {
1994 struct sk_buff *skb = clist;
1995 clist = clist->next;
1996
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001997 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 __kfree_skb(skb);
1999 }
2000 }
2001
2002 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002003 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 local_irq_disable();
2006 head = sd->output_queue;
2007 sd->output_queue = NULL;
2008 local_irq_enable();
2009
2010 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002011 struct Qdisc *q = head;
2012 spinlock_t *root_lock;
2013
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 head = head->next_sched;
2015
David S. Miller5fb66222008-08-02 20:02:43 -07002016 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002017 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002018 smp_mb__before_clear_bit();
2019 clear_bit(__QDISC_STATE_SCHED,
2020 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002021 qdisc_run(q);
2022 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002024 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002025 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002026 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002027 } else {
2028 smp_mb__before_clear_bit();
2029 clear_bit(__QDISC_STATE_SCHED,
2030 &q->state);
2031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 }
2033 }
2034 }
2035}
2036
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002037static inline int deliver_skb(struct sk_buff *skb,
2038 struct packet_type *pt_prev,
2039 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040{
2041 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002042 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044
2045#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002046/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047struct net_bridge;
2048struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2049 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002050void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Stephen Hemminger6229e362007-03-21 13:38:47 -07002052/*
2053 * If bridge module is loaded call bridging hook.
2054 * returns NULL if packet was consumed.
2055 */
2056struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2057 struct sk_buff *skb) __read_mostly;
2058static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2059 struct packet_type **pt_prev, int *ret,
2060 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
2062 struct net_bridge_port *port;
2063
Stephen Hemminger6229e362007-03-21 13:38:47 -07002064 if (skb->pkt_type == PACKET_LOOPBACK ||
2065 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2066 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
2068 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002069 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002071 }
2072
Stephen Hemminger6229e362007-03-21 13:38:47 -07002073 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074}
2075#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002076#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077#endif
2078
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002079#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2080struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2081EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2082
2083static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2084 struct packet_type **pt_prev,
2085 int *ret,
2086 struct net_device *orig_dev)
2087{
2088 if (skb->dev->macvlan_port == NULL)
2089 return skb;
2090
2091 if (*pt_prev) {
2092 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2093 *pt_prev = NULL;
2094 }
2095 return macvlan_handle_frame_hook(skb);
2096}
2097#else
2098#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2099#endif
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101#ifdef CONFIG_NET_CLS_ACT
2102/* TODO: Maybe we should just force sch_ingress to be compiled in
2103 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2104 * a compare and 2 stores extra right now if we dont have it on
2105 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002106 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 * the ingress scheduler, you just cant add policies on ingress.
2108 *
2109 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002110static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002113 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002114 struct netdev_queue *rxq;
2115 int result = TC_ACT_OK;
2116 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002117
Herbert Xuf697c3e2007-10-14 00:38:47 -07002118 if (MAX_RED_LOOP < ttl++) {
2119 printk(KERN_WARNING
2120 "Redir loop detected Dropping packet (%d->%d)\n",
2121 skb->iif, dev->ifindex);
2122 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 }
2124
Herbert Xuf697c3e2007-10-14 00:38:47 -07002125 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2126 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2127
David S. Miller555353c2008-07-08 17:33:13 -07002128 rxq = &dev->rx_queue;
2129
David S. Miller83874002008-07-17 00:53:03 -07002130 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002131 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002132 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002133 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2134 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002135 spin_unlock(qdisc_lock(q));
2136 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002137
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 return result;
2139}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002140
2141static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2142 struct packet_type **pt_prev,
2143 int *ret, struct net_device *orig_dev)
2144{
David S. Miller8d50b532008-07-30 02:37:46 -07002145 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002146 goto out;
2147
2148 if (*pt_prev) {
2149 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2150 *pt_prev = NULL;
2151 } else {
2152 /* Huh? Why does turning on AF_PACKET affect this? */
2153 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2154 }
2155
2156 switch (ing_filter(skb)) {
2157 case TC_ACT_SHOT:
2158 case TC_ACT_STOLEN:
2159 kfree_skb(skb);
2160 return NULL;
2161 }
2162
2163out:
2164 skb->tc_verd = 0;
2165 return skb;
2166}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167#endif
2168
Patrick McHardybc1d0412008-07-14 22:49:30 -07002169/*
2170 * netif_nit_deliver - deliver received packets to network taps
2171 * @skb: buffer
2172 *
2173 * This function is used to deliver incoming packets to network
2174 * taps. It should be used when the normal netif_receive_skb path
2175 * is bypassed, for example because of VLAN acceleration.
2176 */
2177void netif_nit_deliver(struct sk_buff *skb)
2178{
2179 struct packet_type *ptype;
2180
2181 if (list_empty(&ptype_all))
2182 return;
2183
2184 skb_reset_network_header(skb);
2185 skb_reset_transport_header(skb);
2186 skb->mac_len = skb->network_header - skb->mac_header;
2187
2188 rcu_read_lock();
2189 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2190 if (!ptype->dev || ptype->dev == skb->dev)
2191 deliver_skb(skb, ptype, skb->dev);
2192 }
2193 rcu_read_unlock();
2194}
2195
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002196/**
2197 * netif_receive_skb - process receive buffer from network
2198 * @skb: buffer to process
2199 *
2200 * netif_receive_skb() is the main receive data processing function.
2201 * It always succeeds. The buffer may be dropped during processing
2202 * for congestion control or by the protocol layers.
2203 *
2204 * This function may only be called from softirq context and interrupts
2205 * should be enabled.
2206 *
2207 * Return values (usually ignored):
2208 * NET_RX_SUCCESS: no congestion
2209 * NET_RX_DROP: packet was dropped
2210 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211int netif_receive_skb(struct sk_buff *skb)
2212{
2213 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002214 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002215 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002217 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
2219 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002220 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 return NET_RX_DROP;
2222
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002223 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002224 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
Patrick McHardyc01003c2007-03-29 11:46:52 -07002226 if (!skb->iif)
2227 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002228
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002229 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002230 orig_dev = skb->dev;
2231 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002232 if (skb_bond_should_drop(skb))
2233 null_or_orig = orig_dev; /* deliver only exact match */
2234 else
2235 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002236 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 __get_cpu_var(netdev_rx_stat).total++;
2239
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002240 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002241 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002242 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 pt_prev = NULL;
2245
2246 rcu_read_lock();
2247
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002248 /* Don't receive packets in an exiting network namespace */
2249 if (!net_alive(dev_net(skb->dev)))
2250 goto out;
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252#ifdef CONFIG_NET_CLS_ACT
2253 if (skb->tc_verd & TC_NCLS) {
2254 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2255 goto ncls;
2256 }
2257#endif
2258
2259 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002260 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2261 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002262 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002263 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 pt_prev = ptype;
2265 }
2266 }
2267
2268#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002269 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2270 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272ncls:
2273#endif
2274
Stephen Hemminger6229e362007-03-21 13:38:47 -07002275 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2276 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002278 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2279 if (!skb)
2280 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002283 list_for_each_entry_rcu(ptype,
2284 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002286 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2287 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002288 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002289 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 pt_prev = ptype;
2291 }
2292 }
2293
2294 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002295 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 } else {
2297 kfree_skb(skb);
2298 /* Jamal, now you will not able to escape explaining
2299 * me how you were going to use this. :-)
2300 */
2301 ret = NET_RX_DROP;
2302 }
2303
2304out:
2305 rcu_read_unlock();
2306 return ret;
2307}
2308
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002309/* Network device is going away, flush any packets still pending */
2310static void flush_backlog(void *arg)
2311{
2312 struct net_device *dev = arg;
2313 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2314 struct sk_buff *skb, *tmp;
2315
2316 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2317 if (skb->dev == dev) {
2318 __skb_unlink(skb, &queue->input_pkt_queue);
2319 kfree_skb(skb);
2320 }
2321}
2322
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002323static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324{
2325 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2327 unsigned long start_time = jiffies;
2328
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002329 napi->weight = weight_p;
2330 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333 local_irq_disable();
2334 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002335 if (!skb) {
2336 __napi_complete(napi);
2337 local_irq_enable();
2338 break;
2339 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 local_irq_enable();
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002343 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002345 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346}
2347
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002348/**
2349 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002350 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002351 *
2352 * The entry's receive function will be scheduled to run
2353 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002354void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002355{
2356 unsigned long flags;
2357
2358 local_irq_save(flags);
2359 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2360 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2361 local_irq_restore(flags);
2362}
2363EXPORT_SYMBOL(__napi_schedule);
2364
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366static void net_rx_action(struct softirq_action *h)
2367{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002368 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002370 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002371 void *have;
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 local_irq_disable();
2374
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002375 while (!list_empty(list)) {
2376 struct napi_struct *n;
2377 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002379 /* If softirq window is exhuasted then punt.
2380 *
2381 * Note that this is a slight policy change from the
2382 * previous NAPI code, which would allow up to 2
2383 * jiffies to pass before breaking out. The test
2384 * used to be "jiffies - start_time > 1".
2385 */
2386 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 goto softnet_break;
2388
2389 local_irq_enable();
2390
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002391 /* Even though interrupts have been re-enabled, this
2392 * access is safe because interrupts can only add new
2393 * entries to the tail of this list, and only ->poll()
2394 * calls can remove this head entry from the list.
2395 */
2396 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002398 have = netpoll_poll_lock(n);
2399
2400 weight = n->weight;
2401
David S. Miller0a7606c2007-10-29 21:28:47 -07002402 /* This NAPI_STATE_SCHED test is for avoiding a race
2403 * with netpoll's poll_napi(). Only the entity which
2404 * obtains the lock and sees NAPI_STATE_SCHED set will
2405 * actually make the ->poll() call. Therefore we avoid
2406 * accidently calling ->poll() when NAPI is not scheduled.
2407 */
2408 work = 0;
2409 if (test_bit(NAPI_STATE_SCHED, &n->state))
2410 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002411
2412 WARN_ON_ONCE(work > weight);
2413
2414 budget -= work;
2415
2416 local_irq_disable();
2417
2418 /* Drivers must not modify the NAPI state if they
2419 * consume the entire weight. In such cases this code
2420 * still "owns" the NAPI instance and therefore can
2421 * move the instance around on the list at-will.
2422 */
David S. Millerfed17f32008-01-07 21:00:40 -08002423 if (unlikely(work == weight)) {
2424 if (unlikely(napi_disable_pending(n)))
2425 __napi_complete(n);
2426 else
2427 list_move_tail(&n->poll_list, list);
2428 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002429
2430 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 }
2432out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002433 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002434
Chris Leechdb217332006-06-17 21:24:58 -07002435#ifdef CONFIG_NET_DMA
2436 /*
2437 * There may not be any more sk_buffs coming right now, so push
2438 * any pending DMA copies to hardware
2439 */
Dan Williamsd379b012007-07-09 11:56:42 -07002440 if (!cpus_empty(net_dma.channel_mask)) {
2441 int chan_idx;
Mike Travis0e12f842008-05-12 21:21:13 +02002442 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07002443 struct dma_chan *chan = net_dma.channels[chan_idx];
2444 if (chan)
2445 dma_async_memcpy_issue_pending(chan);
2446 }
Chris Leechdb217332006-06-17 21:24:58 -07002447 }
2448#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 return;
2451
2452softnet_break:
2453 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2454 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2455 goto out;
2456}
2457
2458static gifconf_func_t * gifconf_list [NPROTO];
2459
2460/**
2461 * register_gifconf - register a SIOCGIF handler
2462 * @family: Address family
2463 * @gifconf: Function handler
2464 *
2465 * Register protocol dependent address dumping routines. The handler
2466 * that is passed must not be freed or reused until it has been replaced
2467 * by another handler.
2468 */
2469int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2470{
2471 if (family >= NPROTO)
2472 return -EINVAL;
2473 gifconf_list[family] = gifconf;
2474 return 0;
2475}
2476
2477
2478/*
2479 * Map an interface index to its name (SIOCGIFNAME)
2480 */
2481
2482/*
2483 * We need this ioctl for efficient implementation of the
2484 * if_indextoname() function required by the IPv6 API. Without
2485 * it, we would have to search all the interfaces to find a
2486 * match. --pb
2487 */
2488
Eric W. Biederman881d9662007-09-17 11:56:21 -07002489static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490{
2491 struct net_device *dev;
2492 struct ifreq ifr;
2493
2494 /*
2495 * Fetch the caller's info block.
2496 */
2497
2498 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2499 return -EFAULT;
2500
2501 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002502 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 if (!dev) {
2504 read_unlock(&dev_base_lock);
2505 return -ENODEV;
2506 }
2507
2508 strcpy(ifr.ifr_name, dev->name);
2509 read_unlock(&dev_base_lock);
2510
2511 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2512 return -EFAULT;
2513 return 0;
2514}
2515
2516/*
2517 * Perform a SIOCGIFCONF call. This structure will change
2518 * size eventually, and there is nothing I can do about it.
2519 * Thus we will need a 'compatibility mode'.
2520 */
2521
Eric W. Biederman881d9662007-09-17 11:56:21 -07002522static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523{
2524 struct ifconf ifc;
2525 struct net_device *dev;
2526 char __user *pos;
2527 int len;
2528 int total;
2529 int i;
2530
2531 /*
2532 * Fetch the caller's info block.
2533 */
2534
2535 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2536 return -EFAULT;
2537
2538 pos = ifc.ifc_buf;
2539 len = ifc.ifc_len;
2540
2541 /*
2542 * Loop over the interfaces, and write an info block for each.
2543 */
2544
2545 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002546 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 for (i = 0; i < NPROTO; i++) {
2548 if (gifconf_list[i]) {
2549 int done;
2550 if (!pos)
2551 done = gifconf_list[i](dev, NULL, 0);
2552 else
2553 done = gifconf_list[i](dev, pos + total,
2554 len - total);
2555 if (done < 0)
2556 return -EFAULT;
2557 total += done;
2558 }
2559 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562 /*
2563 * All done. Write the updated control block back to the caller.
2564 */
2565 ifc.ifc_len = total;
2566
2567 /*
2568 * Both BSD and Solaris return 0 here, so we do too.
2569 */
2570 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2571}
2572
2573#ifdef CONFIG_PROC_FS
2574/*
2575 * This is invoked by the /proc filesystem handler to display a device
2576 * in detail.
2577 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002579 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
Denis V. Luneve372c412007-11-19 22:31:54 -08002581 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002582 loff_t off;
2583 struct net_device *dev;
2584
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002586 if (!*pos)
2587 return SEQ_START_TOKEN;
2588
2589 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002590 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002591 if (off++ == *pos)
2592 return dev;
2593
2594 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595}
2596
2597void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2598{
Denis V. Luneve372c412007-11-19 22:31:54 -08002599 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002601 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002602 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603}
2604
2605void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002606 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607{
2608 read_unlock(&dev_base_lock);
2609}
2610
2611static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2612{
Rusty Russellc45d2862007-03-28 14:29:08 -07002613 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Rusty Russell5a1b5892007-04-28 21:04:03 -07002615 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2616 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2617 dev->name, stats->rx_bytes, stats->rx_packets,
2618 stats->rx_errors,
2619 stats->rx_dropped + stats->rx_missed_errors,
2620 stats->rx_fifo_errors,
2621 stats->rx_length_errors + stats->rx_over_errors +
2622 stats->rx_crc_errors + stats->rx_frame_errors,
2623 stats->rx_compressed, stats->multicast,
2624 stats->tx_bytes, stats->tx_packets,
2625 stats->tx_errors, stats->tx_dropped,
2626 stats->tx_fifo_errors, stats->collisions,
2627 stats->tx_carrier_errors +
2628 stats->tx_aborted_errors +
2629 stats->tx_window_errors +
2630 stats->tx_heartbeat_errors,
2631 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632}
2633
2634/*
2635 * Called from the PROCfs module. This now uses the new arbitrary sized
2636 * /proc/net interface to create /proc/net/dev
2637 */
2638static int dev_seq_show(struct seq_file *seq, void *v)
2639{
2640 if (v == SEQ_START_TOKEN)
2641 seq_puts(seq, "Inter-| Receive "
2642 " | Transmit\n"
2643 " face |bytes packets errs drop fifo frame "
2644 "compressed multicast|bytes packets errs "
2645 "drop fifo colls carrier compressed\n");
2646 else
2647 dev_seq_printf_stats(seq, v);
2648 return 0;
2649}
2650
2651static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2652{
2653 struct netif_rx_stats *rc = NULL;
2654
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002655 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002656 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 rc = &per_cpu(netdev_rx_stat, *pos);
2658 break;
2659 } else
2660 ++*pos;
2661 return rc;
2662}
2663
2664static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2665{
2666 return softnet_get_online(pos);
2667}
2668
2669static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2670{
2671 ++*pos;
2672 return softnet_get_online(pos);
2673}
2674
2675static void softnet_seq_stop(struct seq_file *seq, void *v)
2676{
2677}
2678
2679static int softnet_seq_show(struct seq_file *seq, void *v)
2680{
2681 struct netif_rx_stats *s = v;
2682
2683 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002684 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002685 0, 0, 0, 0, /* was fastroute */
2686 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 return 0;
2688}
2689
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002690static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 .start = dev_seq_start,
2692 .next = dev_seq_next,
2693 .stop = dev_seq_stop,
2694 .show = dev_seq_show,
2695};
2696
2697static int dev_seq_open(struct inode *inode, struct file *file)
2698{
Denis V. Luneve372c412007-11-19 22:31:54 -08002699 return seq_open_net(inode, file, &dev_seq_ops,
2700 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701}
2702
Arjan van de Ven9a321442007-02-12 00:55:35 -08002703static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 .owner = THIS_MODULE,
2705 .open = dev_seq_open,
2706 .read = seq_read,
2707 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002708 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709};
2710
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002711static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 .start = softnet_seq_start,
2713 .next = softnet_seq_next,
2714 .stop = softnet_seq_stop,
2715 .show = softnet_seq_show,
2716};
2717
2718static int softnet_seq_open(struct inode *inode, struct file *file)
2719{
2720 return seq_open(file, &softnet_seq_ops);
2721}
2722
Arjan van de Ven9a321442007-02-12 00:55:35 -08002723static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 .owner = THIS_MODULE,
2725 .open = softnet_seq_open,
2726 .read = seq_read,
2727 .llseek = seq_lseek,
2728 .release = seq_release,
2729};
2730
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002731static void *ptype_get_idx(loff_t pos)
2732{
2733 struct packet_type *pt = NULL;
2734 loff_t i = 0;
2735 int t;
2736
2737 list_for_each_entry_rcu(pt, &ptype_all, list) {
2738 if (i == pos)
2739 return pt;
2740 ++i;
2741 }
2742
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002743 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002744 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2745 if (i == pos)
2746 return pt;
2747 ++i;
2748 }
2749 }
2750 return NULL;
2751}
2752
2753static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002754 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002755{
2756 rcu_read_lock();
2757 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2758}
2759
2760static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2761{
2762 struct packet_type *pt;
2763 struct list_head *nxt;
2764 int hash;
2765
2766 ++*pos;
2767 if (v == SEQ_START_TOKEN)
2768 return ptype_get_idx(0);
2769
2770 pt = v;
2771 nxt = pt->list.next;
2772 if (pt->type == htons(ETH_P_ALL)) {
2773 if (nxt != &ptype_all)
2774 goto found;
2775 hash = 0;
2776 nxt = ptype_base[0].next;
2777 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002778 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002779
2780 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002781 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002782 return NULL;
2783 nxt = ptype_base[hash].next;
2784 }
2785found:
2786 return list_entry(nxt, struct packet_type, list);
2787}
2788
2789static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002790 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002791{
2792 rcu_read_unlock();
2793}
2794
2795static void ptype_seq_decode(struct seq_file *seq, void *sym)
2796{
2797#ifdef CONFIG_KALLSYMS
2798 unsigned long offset = 0, symsize;
2799 const char *symname;
2800 char *modname;
2801 char namebuf[128];
2802
2803 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2804 &modname, namebuf);
2805
2806 if (symname) {
2807 char *delim = ":";
2808
2809 if (!modname)
2810 modname = delim = "";
2811 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2812 symname, offset);
2813 return;
2814 }
2815#endif
2816
2817 seq_printf(seq, "[%p]", sym);
2818}
2819
2820static int ptype_seq_show(struct seq_file *seq, void *v)
2821{
2822 struct packet_type *pt = v;
2823
2824 if (v == SEQ_START_TOKEN)
2825 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002826 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002827 if (pt->type == htons(ETH_P_ALL))
2828 seq_puts(seq, "ALL ");
2829 else
2830 seq_printf(seq, "%04x", ntohs(pt->type));
2831
2832 seq_printf(seq, " %-8s ",
2833 pt->dev ? pt->dev->name : "");
2834 ptype_seq_decode(seq, pt->func);
2835 seq_putc(seq, '\n');
2836 }
2837
2838 return 0;
2839}
2840
2841static const struct seq_operations ptype_seq_ops = {
2842 .start = ptype_seq_start,
2843 .next = ptype_seq_next,
2844 .stop = ptype_seq_stop,
2845 .show = ptype_seq_show,
2846};
2847
2848static int ptype_seq_open(struct inode *inode, struct file *file)
2849{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002850 return seq_open_net(inode, file, &ptype_seq_ops,
2851 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002852}
2853
2854static const struct file_operations ptype_seq_fops = {
2855 .owner = THIS_MODULE,
2856 .open = ptype_seq_open,
2857 .read = seq_read,
2858 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002859 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002860};
2861
2862
Pavel Emelyanov46650792007-10-08 20:38:39 -07002863static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864{
2865 int rc = -ENOMEM;
2866
Eric W. Biederman881d9662007-09-17 11:56:21 -07002867 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002869 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002871 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002872 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002873
Eric W. Biederman881d9662007-09-17 11:56:21 -07002874 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002875 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 rc = 0;
2877out:
2878 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002879out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002880 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002882 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002884 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 goto out;
2886}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002887
Pavel Emelyanov46650792007-10-08 20:38:39 -07002888static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002889{
2890 wext_proc_exit(net);
2891
2892 proc_net_remove(net, "ptype");
2893 proc_net_remove(net, "softnet_stat");
2894 proc_net_remove(net, "dev");
2895}
2896
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002897static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002898 .init = dev_proc_net_init,
2899 .exit = dev_proc_net_exit,
2900};
2901
2902static int __init dev_proc_init(void)
2903{
2904 return register_pernet_subsys(&dev_proc_ops);
2905}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906#else
2907#define dev_proc_init() 0
2908#endif /* CONFIG_PROC_FS */
2909
2910
2911/**
2912 * netdev_set_master - set up master/slave pair
2913 * @slave: slave device
2914 * @master: new master device
2915 *
2916 * Changes the master device of the slave. Pass %NULL to break the
2917 * bonding. The caller must hold the RTNL semaphore. On a failure
2918 * a negative errno code is returned. On success the reference counts
2919 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2920 * function returns zero.
2921 */
2922int netdev_set_master(struct net_device *slave, struct net_device *master)
2923{
2924 struct net_device *old = slave->master;
2925
2926 ASSERT_RTNL();
2927
2928 if (master) {
2929 if (old)
2930 return -EBUSY;
2931 dev_hold(master);
2932 }
2933
2934 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002935
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 synchronize_net();
2937
2938 if (old)
2939 dev_put(old);
2940
2941 if (master)
2942 slave->flags |= IFF_SLAVE;
2943 else
2944 slave->flags &= ~IFF_SLAVE;
2945
2946 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2947 return 0;
2948}
2949
Wang Chendad9b332008-06-18 01:48:28 -07002950static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002951{
2952 unsigned short old_flags = dev->flags;
2953
Patrick McHardy24023452007-07-14 18:51:31 -07002954 ASSERT_RTNL();
2955
Wang Chendad9b332008-06-18 01:48:28 -07002956 dev->flags |= IFF_PROMISC;
2957 dev->promiscuity += inc;
2958 if (dev->promiscuity == 0) {
2959 /*
2960 * Avoid overflow.
2961 * If inc causes overflow, untouch promisc and return error.
2962 */
2963 if (inc < 0)
2964 dev->flags &= ~IFF_PROMISC;
2965 else {
2966 dev->promiscuity -= inc;
2967 printk(KERN_WARNING "%s: promiscuity touches roof, "
2968 "set promiscuity failed, promiscuity feature "
2969 "of device might be broken.\n", dev->name);
2970 return -EOVERFLOW;
2971 }
2972 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002973 if (dev->flags != old_flags) {
2974 printk(KERN_INFO "device %s %s promiscuous mode\n",
2975 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2976 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002977 if (audit_enabled)
2978 audit_log(current->audit_context, GFP_ATOMIC,
2979 AUDIT_ANOM_PROMISCUOUS,
2980 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2981 dev->name, (dev->flags & IFF_PROMISC),
2982 (old_flags & IFF_PROMISC),
2983 audit_get_loginuid(current),
2984 current->uid, current->gid,
2985 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002986
2987 if (dev->change_rx_flags)
2988 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002989 }
Wang Chendad9b332008-06-18 01:48:28 -07002990 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002991}
2992
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993/**
2994 * dev_set_promiscuity - update promiscuity count on a device
2995 * @dev: device
2996 * @inc: modifier
2997 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002998 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 * remains above zero the interface remains promiscuous. Once it hits zero
3000 * the device reverts back to normal filtering operation. A negative inc
3001 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003002 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 */
Wang Chendad9b332008-06-18 01:48:28 -07003004int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005{
3006 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003007 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
Wang Chendad9b332008-06-18 01:48:28 -07003009 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003010 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003011 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003012 if (dev->flags != old_flags)
3013 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003014 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003015}
3016
3017/**
3018 * dev_set_allmulti - update allmulti count on a device
3019 * @dev: device
3020 * @inc: modifier
3021 *
3022 * Add or remove reception of all multicast frames to a device. While the
3023 * count in the device remains above zero the interface remains listening
3024 * to all interfaces. Once it hits zero the device reverts back to normal
3025 * filtering operation. A negative @inc value is used to drop the counter
3026 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003027 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 */
3029
Wang Chendad9b332008-06-18 01:48:28 -07003030int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031{
3032 unsigned short old_flags = dev->flags;
3033
Patrick McHardy24023452007-07-14 18:51:31 -07003034 ASSERT_RTNL();
3035
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003037 dev->allmulti += inc;
3038 if (dev->allmulti == 0) {
3039 /*
3040 * Avoid overflow.
3041 * If inc causes overflow, untouch allmulti and return error.
3042 */
3043 if (inc < 0)
3044 dev->flags &= ~IFF_ALLMULTI;
3045 else {
3046 dev->allmulti -= inc;
3047 printk(KERN_WARNING "%s: allmulti touches roof, "
3048 "set allmulti failed, allmulti feature of "
3049 "device might be broken.\n", dev->name);
3050 return -EOVERFLOW;
3051 }
3052 }
Patrick McHardy24023452007-07-14 18:51:31 -07003053 if (dev->flags ^ old_flags) {
3054 if (dev->change_rx_flags)
3055 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003056 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003057 }
Wang Chendad9b332008-06-18 01:48:28 -07003058 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003059}
3060
3061/*
3062 * Upload unicast and multicast address lists to device and
3063 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003064 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003065 * are present.
3066 */
3067void __dev_set_rx_mode(struct net_device *dev)
3068{
3069 /* dev_open will call this function so the list will stay sane. */
3070 if (!(dev->flags&IFF_UP))
3071 return;
3072
3073 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003074 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003075
3076 if (dev->set_rx_mode)
3077 dev->set_rx_mode(dev);
3078 else {
3079 /* Unicast addresses changes may only happen under the rtnl,
3080 * therefore calling __dev_set_promiscuity here is safe.
3081 */
3082 if (dev->uc_count > 0 && !dev->uc_promisc) {
3083 __dev_set_promiscuity(dev, 1);
3084 dev->uc_promisc = 1;
3085 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3086 __dev_set_promiscuity(dev, -1);
3087 dev->uc_promisc = 0;
3088 }
3089
3090 if (dev->set_multicast_list)
3091 dev->set_multicast_list(dev);
3092 }
3093}
3094
3095void dev_set_rx_mode(struct net_device *dev)
3096{
David S. Millerb9e40852008-07-15 00:15:08 -07003097 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003098 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003099 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100}
3101
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003102int __dev_addr_delete(struct dev_addr_list **list, int *count,
3103 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003104{
3105 struct dev_addr_list *da;
3106
3107 for (; (da = *list) != NULL; list = &da->next) {
3108 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3109 alen == da->da_addrlen) {
3110 if (glbl) {
3111 int old_glbl = da->da_gusers;
3112 da->da_gusers = 0;
3113 if (old_glbl == 0)
3114 break;
3115 }
3116 if (--da->da_users)
3117 return 0;
3118
3119 *list = da->next;
3120 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003121 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003122 return 0;
3123 }
3124 }
3125 return -ENOENT;
3126}
3127
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003128int __dev_addr_add(struct dev_addr_list **list, int *count,
3129 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003130{
3131 struct dev_addr_list *da;
3132
3133 for (da = *list; da != NULL; da = da->next) {
3134 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3135 da->da_addrlen == alen) {
3136 if (glbl) {
3137 int old_glbl = da->da_gusers;
3138 da->da_gusers = 1;
3139 if (old_glbl)
3140 return 0;
3141 }
3142 da->da_users++;
3143 return 0;
3144 }
3145 }
3146
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003147 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003148 if (da == NULL)
3149 return -ENOMEM;
3150 memcpy(da->da_addr, addr, alen);
3151 da->da_addrlen = alen;
3152 da->da_users = 1;
3153 da->da_gusers = glbl ? 1 : 0;
3154 da->next = *list;
3155 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003156 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003157 return 0;
3158}
3159
Patrick McHardy4417da62007-06-27 01:28:10 -07003160/**
3161 * dev_unicast_delete - Release secondary unicast address.
3162 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003163 * @addr: address to delete
3164 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003165 *
3166 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003167 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003168 *
3169 * The caller must hold the rtnl_mutex.
3170 */
3171int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3172{
3173 int err;
3174
3175 ASSERT_RTNL();
3176
David S. Millerb9e40852008-07-15 00:15:08 -07003177 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003178 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3179 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003180 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003181 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003182 return err;
3183}
3184EXPORT_SYMBOL(dev_unicast_delete);
3185
3186/**
3187 * dev_unicast_add - add a secondary unicast address
3188 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003189 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003190 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003191 *
3192 * Add a secondary unicast address to the device or increase
3193 * the reference count if it already exists.
3194 *
3195 * The caller must hold the rtnl_mutex.
3196 */
3197int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3198{
3199 int err;
3200
3201 ASSERT_RTNL();
3202
David S. Millerb9e40852008-07-15 00:15:08 -07003203 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003204 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3205 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003206 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003207 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003208 return err;
3209}
3210EXPORT_SYMBOL(dev_unicast_add);
3211
Chris Leeche83a2ea2008-01-31 16:53:23 -08003212int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3213 struct dev_addr_list **from, int *from_count)
3214{
3215 struct dev_addr_list *da, *next;
3216 int err = 0;
3217
3218 da = *from;
3219 while (da != NULL) {
3220 next = da->next;
3221 if (!da->da_synced) {
3222 err = __dev_addr_add(to, to_count,
3223 da->da_addr, da->da_addrlen, 0);
3224 if (err < 0)
3225 break;
3226 da->da_synced = 1;
3227 da->da_users++;
3228 } else if (da->da_users == 1) {
3229 __dev_addr_delete(to, to_count,
3230 da->da_addr, da->da_addrlen, 0);
3231 __dev_addr_delete(from, from_count,
3232 da->da_addr, da->da_addrlen, 0);
3233 }
3234 da = next;
3235 }
3236 return err;
3237}
3238
3239void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3240 struct dev_addr_list **from, int *from_count)
3241{
3242 struct dev_addr_list *da, *next;
3243
3244 da = *from;
3245 while (da != NULL) {
3246 next = da->next;
3247 if (da->da_synced) {
3248 __dev_addr_delete(to, to_count,
3249 da->da_addr, da->da_addrlen, 0);
3250 da->da_synced = 0;
3251 __dev_addr_delete(from, from_count,
3252 da->da_addr, da->da_addrlen, 0);
3253 }
3254 da = next;
3255 }
3256}
3257
3258/**
3259 * dev_unicast_sync - Synchronize device's unicast list to another device
3260 * @to: destination device
3261 * @from: source device
3262 *
3263 * Add newly added addresses to the destination device and release
3264 * addresses that have no users left. The source device must be
3265 * locked by netif_tx_lock_bh.
3266 *
3267 * This function is intended to be called from the dev->set_rx_mode
3268 * function of layered software devices.
3269 */
3270int dev_unicast_sync(struct net_device *to, struct net_device *from)
3271{
3272 int err = 0;
3273
David S. Millerb9e40852008-07-15 00:15:08 -07003274 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003275 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3276 &from->uc_list, &from->uc_count);
3277 if (!err)
3278 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003279 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003280 return err;
3281}
3282EXPORT_SYMBOL(dev_unicast_sync);
3283
3284/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003285 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003286 * @to: destination device
3287 * @from: source device
3288 *
3289 * Remove all addresses that were added to the destination device by
3290 * dev_unicast_sync(). This function is intended to be called from the
3291 * dev->stop function of layered software devices.
3292 */
3293void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3294{
David S. Millerb9e40852008-07-15 00:15:08 -07003295 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003296 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003297
3298 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3299 &from->uc_list, &from->uc_count);
3300 __dev_set_rx_mode(to);
3301
David S. Millere308a5d2008-07-15 00:13:44 -07003302 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003303 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003304}
3305EXPORT_SYMBOL(dev_unicast_unsync);
3306
Denis Cheng12972622007-07-18 02:12:56 -07003307static void __dev_addr_discard(struct dev_addr_list **list)
3308{
3309 struct dev_addr_list *tmp;
3310
3311 while (*list != NULL) {
3312 tmp = *list;
3313 *list = tmp->next;
3314 if (tmp->da_users > tmp->da_gusers)
3315 printk("__dev_addr_discard: address leakage! "
3316 "da_users=%d\n", tmp->da_users);
3317 kfree(tmp);
3318 }
3319}
3320
Denis Cheng26cc2522007-07-18 02:12:03 -07003321static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003322{
David S. Millerb9e40852008-07-15 00:15:08 -07003323 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003324
Patrick McHardy4417da62007-06-27 01:28:10 -07003325 __dev_addr_discard(&dev->uc_list);
3326 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003327
Denis Cheng456ad752007-07-18 02:10:54 -07003328 __dev_addr_discard(&dev->mc_list);
3329 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003330
David S. Millerb9e40852008-07-15 00:15:08 -07003331 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003332}
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334unsigned dev_get_flags(const struct net_device *dev)
3335{
3336 unsigned flags;
3337
3338 flags = (dev->flags & ~(IFF_PROMISC |
3339 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003340 IFF_RUNNING |
3341 IFF_LOWER_UP |
3342 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 (dev->gflags & (IFF_PROMISC |
3344 IFF_ALLMULTI));
3345
Stefan Rompfb00055a2006-03-20 17:09:11 -08003346 if (netif_running(dev)) {
3347 if (netif_oper_up(dev))
3348 flags |= IFF_RUNNING;
3349 if (netif_carrier_ok(dev))
3350 flags |= IFF_LOWER_UP;
3351 if (netif_dormant(dev))
3352 flags |= IFF_DORMANT;
3353 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
3355 return flags;
3356}
3357
3358int dev_change_flags(struct net_device *dev, unsigned flags)
3359{
Thomas Graf7c355f52007-06-05 16:03:03 -07003360 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 int old_flags = dev->flags;
3362
Patrick McHardy24023452007-07-14 18:51:31 -07003363 ASSERT_RTNL();
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 /*
3366 * Set the flags on our device.
3367 */
3368
3369 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3370 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3371 IFF_AUTOMEDIA)) |
3372 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3373 IFF_ALLMULTI));
3374
3375 /*
3376 * Load in the correct multicast list now the flags have changed.
3377 */
3378
David Woodhouse0e917962008-05-20 14:36:14 -07003379 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003380 dev->change_rx_flags(dev, IFF_MULTICAST);
3381
Patrick McHardy4417da62007-06-27 01:28:10 -07003382 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383
3384 /*
3385 * Have we downed the interface. We handle IFF_UP ourselves
3386 * according to user attempts to set it, rather than blindly
3387 * setting it.
3388 */
3389
3390 ret = 0;
3391 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3392 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3393
3394 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003395 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 }
3397
3398 if (dev->flags & IFF_UP &&
3399 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3400 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003401 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402
3403 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3404 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3405 dev->gflags ^= IFF_PROMISC;
3406 dev_set_promiscuity(dev, inc);
3407 }
3408
3409 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3410 is important. Some (broken) drivers set IFF_PROMISC, when
3411 IFF_ALLMULTI is requested not asking us and not reporting.
3412 */
3413 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3414 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3415 dev->gflags ^= IFF_ALLMULTI;
3416 dev_set_allmulti(dev, inc);
3417 }
3418
Thomas Graf7c355f52007-06-05 16:03:03 -07003419 /* Exclude state transition flags, already notified */
3420 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3421 if (changes)
3422 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423
3424 return ret;
3425}
3426
3427int dev_set_mtu(struct net_device *dev, int new_mtu)
3428{
3429 int err;
3430
3431 if (new_mtu == dev->mtu)
3432 return 0;
3433
3434 /* MTU must be positive. */
3435 if (new_mtu < 0)
3436 return -EINVAL;
3437
3438 if (!netif_device_present(dev))
3439 return -ENODEV;
3440
3441 err = 0;
3442 if (dev->change_mtu)
3443 err = dev->change_mtu(dev, new_mtu);
3444 else
3445 dev->mtu = new_mtu;
3446 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003447 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448 return err;
3449}
3450
3451int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3452{
3453 int err;
3454
3455 if (!dev->set_mac_address)
3456 return -EOPNOTSUPP;
3457 if (sa->sa_family != dev->type)
3458 return -EINVAL;
3459 if (!netif_device_present(dev))
3460 return -ENODEV;
3461 err = dev->set_mac_address(dev, sa);
3462 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003463 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 return err;
3465}
3466
3467/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003468 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003470static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471{
3472 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003473 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
3475 if (!dev)
3476 return -ENODEV;
3477
3478 switch (cmd) {
3479 case SIOCGIFFLAGS: /* Get interface flags */
3480 ifr->ifr_flags = dev_get_flags(dev);
3481 return 0;
3482
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 case SIOCGIFMETRIC: /* Get the metric on the interface
3484 (currently unused) */
3485 ifr->ifr_metric = 0;
3486 return 0;
3487
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 case SIOCGIFMTU: /* Get the MTU of a device */
3489 ifr->ifr_mtu = dev->mtu;
3490 return 0;
3491
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 case SIOCGIFHWADDR:
3493 if (!dev->addr_len)
3494 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3495 else
3496 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3497 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3498 ifr->ifr_hwaddr.sa_family = dev->type;
3499 return 0;
3500
Jeff Garzik14e3e072007-10-08 00:06:32 -07003501 case SIOCGIFSLAVE:
3502 err = -EINVAL;
3503 break;
3504
3505 case SIOCGIFMAP:
3506 ifr->ifr_map.mem_start = dev->mem_start;
3507 ifr->ifr_map.mem_end = dev->mem_end;
3508 ifr->ifr_map.base_addr = dev->base_addr;
3509 ifr->ifr_map.irq = dev->irq;
3510 ifr->ifr_map.dma = dev->dma;
3511 ifr->ifr_map.port = dev->if_port;
3512 return 0;
3513
3514 case SIOCGIFINDEX:
3515 ifr->ifr_ifindex = dev->ifindex;
3516 return 0;
3517
3518 case SIOCGIFTXQLEN:
3519 ifr->ifr_qlen = dev->tx_queue_len;
3520 return 0;
3521
3522 default:
3523 /* dev_ioctl() should ensure this case
3524 * is never reached
3525 */
3526 WARN_ON(1);
3527 err = -EINVAL;
3528 break;
3529
3530 }
3531 return err;
3532}
3533
3534/*
3535 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3536 */
3537static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3538{
3539 int err;
3540 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3541
3542 if (!dev)
3543 return -ENODEV;
3544
3545 switch (cmd) {
3546 case SIOCSIFFLAGS: /* Set interface flags */
3547 return dev_change_flags(dev, ifr->ifr_flags);
3548
3549 case SIOCSIFMETRIC: /* Set the metric on the interface
3550 (currently unused) */
3551 return -EOPNOTSUPP;
3552
3553 case SIOCSIFMTU: /* Set the MTU of a device */
3554 return dev_set_mtu(dev, ifr->ifr_mtu);
3555
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 case SIOCSIFHWADDR:
3557 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3558
3559 case SIOCSIFHWBROADCAST:
3560 if (ifr->ifr_hwaddr.sa_family != dev->type)
3561 return -EINVAL;
3562 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3563 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003564 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 return 0;
3566
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 case SIOCSIFMAP:
3568 if (dev->set_config) {
3569 if (!netif_device_present(dev))
3570 return -ENODEV;
3571 return dev->set_config(dev, &ifr->ifr_map);
3572 }
3573 return -EOPNOTSUPP;
3574
3575 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003576 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3578 return -EINVAL;
3579 if (!netif_device_present(dev))
3580 return -ENODEV;
3581 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3582 dev->addr_len, 1);
3583
3584 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003585 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3587 return -EINVAL;
3588 if (!netif_device_present(dev))
3589 return -ENODEV;
3590 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3591 dev->addr_len, 1);
3592
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593 case SIOCSIFTXQLEN:
3594 if (ifr->ifr_qlen < 0)
3595 return -EINVAL;
3596 dev->tx_queue_len = ifr->ifr_qlen;
3597 return 0;
3598
3599 case SIOCSIFNAME:
3600 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3601 return dev_change_name(dev, ifr->ifr_newname);
3602
3603 /*
3604 * Unknown or private ioctl
3605 */
3606
3607 default:
3608 if ((cmd >= SIOCDEVPRIVATE &&
3609 cmd <= SIOCDEVPRIVATE + 15) ||
3610 cmd == SIOCBONDENSLAVE ||
3611 cmd == SIOCBONDRELEASE ||
3612 cmd == SIOCBONDSETHWADDR ||
3613 cmd == SIOCBONDSLAVEINFOQUERY ||
3614 cmd == SIOCBONDINFOQUERY ||
3615 cmd == SIOCBONDCHANGEACTIVE ||
3616 cmd == SIOCGMIIPHY ||
3617 cmd == SIOCGMIIREG ||
3618 cmd == SIOCSMIIREG ||
3619 cmd == SIOCBRADDIF ||
3620 cmd == SIOCBRDELIF ||
3621 cmd == SIOCWANDEV) {
3622 err = -EOPNOTSUPP;
3623 if (dev->do_ioctl) {
3624 if (netif_device_present(dev))
3625 err = dev->do_ioctl(dev, ifr,
3626 cmd);
3627 else
3628 err = -ENODEV;
3629 }
3630 } else
3631 err = -EINVAL;
3632
3633 }
3634 return err;
3635}
3636
3637/*
3638 * This function handles all "interface"-type I/O control requests. The actual
3639 * 'doing' part of this is dev_ifsioc above.
3640 */
3641
3642/**
3643 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003644 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 * @cmd: command to issue
3646 * @arg: pointer to a struct ifreq in user space
3647 *
3648 * Issue ioctl functions to devices. This is normally called by the
3649 * user space syscall interfaces but can sometimes be useful for
3650 * other purposes. The return value is the return from the syscall if
3651 * positive or a negative errno code on error.
3652 */
3653
Eric W. Biederman881d9662007-09-17 11:56:21 -07003654int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655{
3656 struct ifreq ifr;
3657 int ret;
3658 char *colon;
3659
3660 /* One special case: SIOCGIFCONF takes ifconf argument
3661 and requires shared lock, because it sleeps writing
3662 to user space.
3663 */
3664
3665 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003666 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003667 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003668 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 return ret;
3670 }
3671 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003672 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673
3674 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3675 return -EFAULT;
3676
3677 ifr.ifr_name[IFNAMSIZ-1] = 0;
3678
3679 colon = strchr(ifr.ifr_name, ':');
3680 if (colon)
3681 *colon = 0;
3682
3683 /*
3684 * See which interface the caller is talking about.
3685 */
3686
3687 switch (cmd) {
3688 /*
3689 * These ioctl calls:
3690 * - can be done by all.
3691 * - atomic and do not require locking.
3692 * - return a value
3693 */
3694 case SIOCGIFFLAGS:
3695 case SIOCGIFMETRIC:
3696 case SIOCGIFMTU:
3697 case SIOCGIFHWADDR:
3698 case SIOCGIFSLAVE:
3699 case SIOCGIFMAP:
3700 case SIOCGIFINDEX:
3701 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003702 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003704 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 read_unlock(&dev_base_lock);
3706 if (!ret) {
3707 if (colon)
3708 *colon = ':';
3709 if (copy_to_user(arg, &ifr,
3710 sizeof(struct ifreq)))
3711 ret = -EFAULT;
3712 }
3713 return ret;
3714
3715 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003716 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003718 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 rtnl_unlock();
3720 if (!ret) {
3721 if (colon)
3722 *colon = ':';
3723 if (copy_to_user(arg, &ifr,
3724 sizeof(struct ifreq)))
3725 ret = -EFAULT;
3726 }
3727 return ret;
3728
3729 /*
3730 * These ioctl calls:
3731 * - require superuser power.
3732 * - require strict serialization.
3733 * - return a value
3734 */
3735 case SIOCGMIIPHY:
3736 case SIOCGMIIREG:
3737 case SIOCSIFNAME:
3738 if (!capable(CAP_NET_ADMIN))
3739 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003740 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003742 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 rtnl_unlock();
3744 if (!ret) {
3745 if (colon)
3746 *colon = ':';
3747 if (copy_to_user(arg, &ifr,
3748 sizeof(struct ifreq)))
3749 ret = -EFAULT;
3750 }
3751 return ret;
3752
3753 /*
3754 * These ioctl calls:
3755 * - require superuser power.
3756 * - require strict serialization.
3757 * - do not return a value
3758 */
3759 case SIOCSIFFLAGS:
3760 case SIOCSIFMETRIC:
3761 case SIOCSIFMTU:
3762 case SIOCSIFMAP:
3763 case SIOCSIFHWADDR:
3764 case SIOCSIFSLAVE:
3765 case SIOCADDMULTI:
3766 case SIOCDELMULTI:
3767 case SIOCSIFHWBROADCAST:
3768 case SIOCSIFTXQLEN:
3769 case SIOCSMIIREG:
3770 case SIOCBONDENSLAVE:
3771 case SIOCBONDRELEASE:
3772 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 case SIOCBONDCHANGEACTIVE:
3774 case SIOCBRADDIF:
3775 case SIOCBRDELIF:
3776 if (!capable(CAP_NET_ADMIN))
3777 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003778 /* fall through */
3779 case SIOCBONDSLAVEINFOQUERY:
3780 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003781 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003783 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 rtnl_unlock();
3785 return ret;
3786
3787 case SIOCGIFMEM:
3788 /* Get the per device memory space. We can add this but
3789 * currently do not support it */
3790 case SIOCSIFMEM:
3791 /* Set the per device memory buffer space.
3792 * Not applicable in our case */
3793 case SIOCSIFLINK:
3794 return -EINVAL;
3795
3796 /*
3797 * Unknown or private ioctl.
3798 */
3799 default:
3800 if (cmd == SIOCWANDEV ||
3801 (cmd >= SIOCDEVPRIVATE &&
3802 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003803 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003805 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 rtnl_unlock();
3807 if (!ret && copy_to_user(arg, &ifr,
3808 sizeof(struct ifreq)))
3809 ret = -EFAULT;
3810 return ret;
3811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003813 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003814 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 return -EINVAL;
3816 }
3817}
3818
3819
3820/**
3821 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003822 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 *
3824 * Returns a suitable unique value for a new device interface
3825 * number. The caller must hold the rtnl semaphore or the
3826 * dev_base_lock to be sure it remains unique.
3827 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003828static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829{
3830 static int ifindex;
3831 for (;;) {
3832 if (++ifindex <= 0)
3833 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003834 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 return ifindex;
3836 }
3837}
3838
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839/* Delayed registration/unregisteration */
3840static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003841static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003843static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844{
3845 spin_lock(&net_todo_list_lock);
3846 list_add_tail(&dev->todo_list, &net_todo_list);
3847 spin_unlock(&net_todo_list_lock);
3848}
3849
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003850static void rollback_registered(struct net_device *dev)
3851{
3852 BUG_ON(dev_boot_phase);
3853 ASSERT_RTNL();
3854
3855 /* Some devices call without registering for initialization unwind. */
3856 if (dev->reg_state == NETREG_UNINITIALIZED) {
3857 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3858 "was registered\n", dev->name, dev);
3859
3860 WARN_ON(1);
3861 return;
3862 }
3863
3864 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3865
3866 /* If device is running, close it first. */
3867 dev_close(dev);
3868
3869 /* And unlink it from device chain. */
3870 unlist_netdevice(dev);
3871
3872 dev->reg_state = NETREG_UNREGISTERING;
3873
3874 synchronize_net();
3875
3876 /* Shutdown queueing discipline. */
3877 dev_shutdown(dev);
3878
3879
3880 /* Notify protocols, that we are about to destroy
3881 this device. They should clean all the things.
3882 */
3883 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3884
3885 /*
3886 * Flush the unicast and multicast chains
3887 */
3888 dev_addr_discard(dev);
3889
3890 if (dev->uninit)
3891 dev->uninit(dev);
3892
3893 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003894 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003895
3896 /* Remove entries from kobject tree */
3897 netdev_unregister_kobject(dev);
3898
3899 synchronize_net();
3900
3901 dev_put(dev);
3902}
3903
David S. Millere8a04642008-07-17 00:34:19 -07003904static void __netdev_init_queue_locks_one(struct net_device *dev,
3905 struct netdev_queue *dev_queue,
3906 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07003907{
3908 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003909 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07003910 dev_queue->xmit_lock_owner = -1;
3911}
3912
3913static void netdev_init_queue_locks(struct net_device *dev)
3914{
David S. Millere8a04642008-07-17 00:34:19 -07003915 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3916 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07003917}
3918
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919/**
3920 * register_netdevice - register a network device
3921 * @dev: device to register
3922 *
3923 * Take a completed network device structure and add it to the kernel
3924 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3925 * chain. 0 is returned on success. A negative errno code is returned
3926 * on a failure to set up the device, or if the name is a duplicate.
3927 *
3928 * Callers must hold the rtnl semaphore. You may want
3929 * register_netdev() instead of this.
3930 *
3931 * BUGS:
3932 * The locking appears insufficient to guarantee two parallel registers
3933 * will not get the same name.
3934 */
3935
3936int register_netdevice(struct net_device *dev)
3937{
3938 struct hlist_head *head;
3939 struct hlist_node *p;
3940 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003941 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942
3943 BUG_ON(dev_boot_phase);
3944 ASSERT_RTNL();
3945
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003946 might_sleep();
3947
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 /* When net_device's are persistent, this will be fatal. */
3949 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003950 BUG_ON(!dev_net(dev));
3951 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952
David S. Millerf1f28aa2008-07-15 00:08:33 -07003953 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003954 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07003955 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 dev->iflink = -1;
3958
3959 /* Init, if this function is available */
3960 if (dev->init) {
3961 ret = dev->init(dev);
3962 if (ret) {
3963 if (ret > 0)
3964 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003965 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 }
3967 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003968
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969 if (!dev_valid_name(dev->name)) {
3970 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003971 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 }
3973
Eric W. Biederman881d9662007-09-17 11:56:21 -07003974 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 if (dev->iflink == -1)
3976 dev->iflink = dev->ifindex;
3977
3978 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003979 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980 hlist_for_each(p, head) {
3981 struct net_device *d
3982 = hlist_entry(p, struct net_device, name_hlist);
3983 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3984 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003985 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003987 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003989 /* Fix illegal checksum combinations */
3990 if ((dev->features & NETIF_F_HW_CSUM) &&
3991 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3992 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3993 dev->name);
3994 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3995 }
3996
3997 if ((dev->features & NETIF_F_NO_CSUM) &&
3998 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3999 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4000 dev->name);
4001 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4002 }
4003
4004
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 /* Fix illegal SG+CSUM combinations. */
4006 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07004007 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07004008 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 dev->name);
4010 dev->features &= ~NETIF_F_SG;
4011 }
4012
4013 /* TSO requires that SG is present as well. */
4014 if ((dev->features & NETIF_F_TSO) &&
4015 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07004016 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 dev->name);
4018 dev->features &= ~NETIF_F_TSO;
4019 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07004020 if (dev->features & NETIF_F_UFO) {
4021 if (!(dev->features & NETIF_F_HW_CSUM)) {
4022 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4023 "NETIF_F_HW_CSUM feature.\n",
4024 dev->name);
4025 dev->features &= ~NETIF_F_UFO;
4026 }
4027 if (!(dev->features & NETIF_F_SG)) {
4028 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4029 "NETIF_F_SG feature.\n",
4030 dev->name);
4031 dev->features &= ~NETIF_F_UFO;
4032 }
4033 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004034
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004035 /* Enable software GSO if SG is supported. */
4036 if (dev->features & NETIF_F_SG)
4037 dev->features |= NETIF_F_GSO;
4038
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004039 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004040 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004041 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004042 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004043 dev->reg_state = NETREG_REGISTERED;
4044
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 /*
4046 * Default initial state at registry is that the
4047 * device is present.
4048 */
4049
4050 set_bit(__LINK_STATE_PRESENT, &dev->state);
4051
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004054 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
4056 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004057 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004058 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004059 if (ret) {
4060 rollback_registered(dev);
4061 dev->reg_state = NETREG_UNREGISTERED;
4062 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063
4064out:
4065 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004066
4067err_uninit:
4068 if (dev->uninit)
4069 dev->uninit(dev);
4070 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071}
4072
4073/**
4074 * register_netdev - register a network device
4075 * @dev: device to register
4076 *
4077 * Take a completed network device structure and add it to the kernel
4078 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4079 * chain. 0 is returned on success. A negative errno code is returned
4080 * on a failure to set up the device, or if the name is a duplicate.
4081 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004082 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 * and expands the device name if you passed a format string to
4084 * alloc_netdev.
4085 */
4086int register_netdev(struct net_device *dev)
4087{
4088 int err;
4089
4090 rtnl_lock();
4091
4092 /*
4093 * If the name is a format string the caller wants us to do a
4094 * name allocation.
4095 */
4096 if (strchr(dev->name, '%')) {
4097 err = dev_alloc_name(dev, dev->name);
4098 if (err < 0)
4099 goto out;
4100 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004101
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 err = register_netdevice(dev);
4103out:
4104 rtnl_unlock();
4105 return err;
4106}
4107EXPORT_SYMBOL(register_netdev);
4108
4109/*
4110 * netdev_wait_allrefs - wait until all references are gone.
4111 *
4112 * This is called when unregistering network devices.
4113 *
4114 * Any protocol or device that holds a reference should register
4115 * for netdevice notification, and cleanup and put back the
4116 * reference if they receive an UNREGISTER event.
4117 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004118 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 */
4120static void netdev_wait_allrefs(struct net_device *dev)
4121{
4122 unsigned long rebroadcast_time, warning_time;
4123
4124 rebroadcast_time = warning_time = jiffies;
4125 while (atomic_read(&dev->refcnt) != 0) {
4126 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004127 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128
4129 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004130 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131
4132 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4133 &dev->state)) {
4134 /* We must not have linkwatch events
4135 * pending on unregister. If this
4136 * happens, we simply run the queue
4137 * unscheduled, resulting in a noop
4138 * for this device.
4139 */
4140 linkwatch_run_queue();
4141 }
4142
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004143 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
4145 rebroadcast_time = jiffies;
4146 }
4147
4148 msleep(250);
4149
4150 if (time_after(jiffies, warning_time + 10 * HZ)) {
4151 printk(KERN_EMERG "unregister_netdevice: "
4152 "waiting for %s to become free. Usage "
4153 "count = %d\n",
4154 dev->name, atomic_read(&dev->refcnt));
4155 warning_time = jiffies;
4156 }
4157 }
4158}
4159
4160/* The sequence is:
4161 *
4162 * rtnl_lock();
4163 * ...
4164 * register_netdevice(x1);
4165 * register_netdevice(x2);
4166 * ...
4167 * unregister_netdevice(y1);
4168 * unregister_netdevice(y2);
4169 * ...
4170 * rtnl_unlock();
4171 * free_netdev(y1);
4172 * free_netdev(y2);
4173 *
4174 * We are invoked by rtnl_unlock() after it drops the semaphore.
4175 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004176 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 * without deadlocking with linkwatch via keventd.
4178 * 2) Since we run with the RTNL semaphore not held, we can sleep
4179 * safely in order to wait for the netdev refcnt to drop to zero.
4180 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004181static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182void netdev_run_todo(void)
4183{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004184 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185
4186 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004187 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188
4189 /* Not safe to do outside the semaphore. We must not return
4190 * until all unregister events invoked by the local processor
4191 * have been completed (either by this todo run, or one on
4192 * another cpu).
4193 */
4194 if (list_empty(&net_todo_list))
4195 goto out;
4196
4197 /* Snapshot list, allow later requests */
4198 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004199 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 while (!list_empty(&list)) {
4203 struct net_device *dev
4204 = list_entry(list.next, struct net_device, todo_list);
4205 list_del(&dev->todo_list);
4206
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004207 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 printk(KERN_ERR "network todo '%s' but state %d\n",
4209 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004210 dump_stack();
4211 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004213
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004214 dev->reg_state = NETREG_UNREGISTERED;
4215
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004216 on_each_cpu(flush_backlog, dev, 1);
4217
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004218 netdev_wait_allrefs(dev);
4219
4220 /* paranoia */
4221 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004222 WARN_ON(dev->ip_ptr);
4223 WARN_ON(dev->ip6_ptr);
4224 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004225
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004226 if (dev->destructor)
4227 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004228
4229 /* Free network device */
4230 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 }
4232
4233out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004234 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235}
4236
Rusty Russell5a1b5892007-04-28 21:04:03 -07004237static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004238{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004239 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004240}
4241
David S. Millerdc2b4842008-07-08 17:18:23 -07004242static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004243 struct netdev_queue *queue,
4244 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004245{
David S. Millerdc2b4842008-07-08 17:18:23 -07004246 queue->dev = dev;
4247}
4248
David S. Millerbb949fb2008-07-08 16:55:56 -07004249static void netdev_init_queues(struct net_device *dev)
4250{
David S. Millere8a04642008-07-17 00:34:19 -07004251 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4252 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07004253 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07004254}
4255
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004257 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 * @sizeof_priv: size of private data to allocate space for
4259 * @name: device name format string
4260 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004261 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 *
4263 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004264 * and performs basic initialization. Also allocates subquue structs
4265 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004267struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4268 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269{
David S. Millere8a04642008-07-17 00:34:19 -07004270 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07004272 size_t alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004273 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004275 BUG_ON(strlen(name) >= sizeof(dev->name));
4276
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004277 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004278 if (sizeof_priv) {
4279 /* ensure 32-byte alignment of private area */
4280 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4281 alloc_size += sizeof_priv;
4282 }
4283 /* ensure 32-byte alignment of whole construct */
4284 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004286 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004288 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289 return NULL;
4290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291
Stephen Hemminger79439862008-07-21 13:28:44 -07004292 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07004293 if (!tx) {
4294 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4295 "tx qdiscs.\n");
4296 kfree(p);
4297 return NULL;
4298 }
4299
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 dev = (struct net_device *)
4301 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4302 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004303 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304
David S. Millere8a04642008-07-17 00:34:19 -07004305 dev->_tx = tx;
4306 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004307 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07004308
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004309 if (sizeof_priv) {
4310 dev->priv = ((char *)dev +
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004311 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004312 & ~NETDEV_ALIGN_CONST));
4313 }
4314
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004315 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
David S. Millerbb949fb2008-07-08 16:55:56 -07004317 netdev_init_queues(dev);
4318
Rusty Russell5a1b5892007-04-28 21:04:03 -07004319 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004320 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 setup(dev);
4322 strcpy(dev->name, name);
4323 return dev;
4324}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004325EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326
4327/**
4328 * free_netdev - free network device
4329 * @dev: device
4330 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004331 * This function does the last stage of destroying an allocated device
4332 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 * If this is the last reference then it will be freed.
4334 */
4335void free_netdev(struct net_device *dev)
4336{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004337 release_net(dev_net(dev));
4338
David S. Millere8a04642008-07-17 00:34:19 -07004339 kfree(dev->_tx);
4340
Stephen Hemminger3041a062006-05-26 13:25:24 -07004341 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342 if (dev->reg_state == NETREG_UNINITIALIZED) {
4343 kfree((char *)dev - dev->padded);
4344 return;
4345 }
4346
4347 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4348 dev->reg_state = NETREG_RELEASED;
4349
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004350 /* will free via device release */
4351 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004353
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004355void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356{
4357 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004358 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359}
4360
4361/**
4362 * unregister_netdevice - remove device from the kernel
4363 * @dev: device
4364 *
4365 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004366 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 *
4368 * Callers must hold the rtnl semaphore. You may want
4369 * unregister_netdev() instead of this.
4370 */
4371
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004372void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
Herbert Xua6620712007-12-12 19:21:56 -08004374 ASSERT_RTNL();
4375
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004376 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 /* Finish processing unregister after unlock */
4378 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379}
4380
4381/**
4382 * unregister_netdev - remove device from the kernel
4383 * @dev: device
4384 *
4385 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004386 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 *
4388 * This is just a wrapper for unregister_netdevice that takes
4389 * the rtnl semaphore. In general you want to use this and not
4390 * unregister_netdevice.
4391 */
4392void unregister_netdev(struct net_device *dev)
4393{
4394 rtnl_lock();
4395 unregister_netdevice(dev);
4396 rtnl_unlock();
4397}
4398
4399EXPORT_SYMBOL(unregister_netdev);
4400
Eric W. Biedermance286d32007-09-12 13:53:49 +02004401/**
4402 * dev_change_net_namespace - move device to different nethost namespace
4403 * @dev: device
4404 * @net: network namespace
4405 * @pat: If not NULL name pattern to try if the current device name
4406 * is already taken in the destination network namespace.
4407 *
4408 * This function shuts down a device interface and moves it
4409 * to a new network namespace. On success 0 is returned, on
4410 * a failure a netagive errno code is returned.
4411 *
4412 * Callers must hold the rtnl semaphore.
4413 */
4414
4415int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4416{
4417 char buf[IFNAMSIZ];
4418 const char *destname;
4419 int err;
4420
4421 ASSERT_RTNL();
4422
4423 /* Don't allow namespace local devices to be moved. */
4424 err = -EINVAL;
4425 if (dev->features & NETIF_F_NETNS_LOCAL)
4426 goto out;
4427
4428 /* Ensure the device has been registrered */
4429 err = -EINVAL;
4430 if (dev->reg_state != NETREG_REGISTERED)
4431 goto out;
4432
4433 /* Get out if there is nothing todo */
4434 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004435 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004436 goto out;
4437
4438 /* Pick the destination device name, and ensure
4439 * we can use it in the destination network namespace.
4440 */
4441 err = -EEXIST;
4442 destname = dev->name;
4443 if (__dev_get_by_name(net, destname)) {
4444 /* We get here if we can't use the current device name */
4445 if (!pat)
4446 goto out;
4447 if (!dev_valid_name(pat))
4448 goto out;
4449 if (strchr(pat, '%')) {
4450 if (__dev_alloc_name(net, pat, buf) < 0)
4451 goto out;
4452 destname = buf;
4453 } else
4454 destname = pat;
4455 if (__dev_get_by_name(net, destname))
4456 goto out;
4457 }
4458
4459 /*
4460 * And now a mini version of register_netdevice unregister_netdevice.
4461 */
4462
4463 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004464 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004465
4466 /* And unlink it from device chain */
4467 err = -ENODEV;
4468 unlist_netdevice(dev);
4469
4470 synchronize_net();
4471
4472 /* Shutdown queueing discipline. */
4473 dev_shutdown(dev);
4474
4475 /* Notify protocols, that we are about to destroy
4476 this device. They should clean all the things.
4477 */
4478 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4479
4480 /*
4481 * Flush the unicast and multicast chains
4482 */
4483 dev_addr_discard(dev);
4484
4485 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004486 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004487
4488 /* Assign the new device name */
4489 if (destname != dev->name)
4490 strcpy(dev->name, destname);
4491
4492 /* If there is an ifindex conflict assign a new one */
4493 if (__dev_get_by_index(net, dev->ifindex)) {
4494 int iflink = (dev->iflink == dev->ifindex);
4495 dev->ifindex = dev_new_index(net);
4496 if (iflink)
4497 dev->iflink = dev->ifindex;
4498 }
4499
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004500 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004501 netdev_unregister_kobject(dev);
4502 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004503 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004504
4505 /* Add the device back in the hashes */
4506 list_netdevice(dev);
4507
4508 /* Notify protocols, that a new device appeared. */
4509 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4510
4511 synchronize_net();
4512 err = 0;
4513out:
4514 return err;
4515}
4516
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517static int dev_cpu_callback(struct notifier_block *nfb,
4518 unsigned long action,
4519 void *ocpu)
4520{
4521 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07004522 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 struct sk_buff *skb;
4524 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4525 struct softnet_data *sd, *oldsd;
4526
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004527 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 return NOTIFY_OK;
4529
4530 local_irq_disable();
4531 cpu = smp_processor_id();
4532 sd = &per_cpu(softnet_data, cpu);
4533 oldsd = &per_cpu(softnet_data, oldcpu);
4534
4535 /* Find end of our completion_queue. */
4536 list_skb = &sd->completion_queue;
4537 while (*list_skb)
4538 list_skb = &(*list_skb)->next;
4539 /* Append completion queue from offline CPU. */
4540 *list_skb = oldsd->completion_queue;
4541 oldsd->completion_queue = NULL;
4542
4543 /* Find end of our output_queue. */
4544 list_net = &sd->output_queue;
4545 while (*list_net)
4546 list_net = &(*list_net)->next_sched;
4547 /* Append output queue from offline CPU. */
4548 *list_net = oldsd->output_queue;
4549 oldsd->output_queue = NULL;
4550
4551 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4552 local_irq_enable();
4553
4554 /* Process offline CPU's input_pkt_queue */
4555 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4556 netif_rx(skb);
4557
4558 return NOTIFY_OK;
4559}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560
Chris Leechdb217332006-06-17 21:24:58 -07004561#ifdef CONFIG_NET_DMA
4562/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004563 * net_dma_rebalance - try to maintain one DMA channel per CPU
4564 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4565 *
4566 * This is called when the number of channels allocated to the net_dma client
4567 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004568 */
Dan Williamsd379b012007-07-09 11:56:42 -07004569
4570static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004571{
Dan Williamsd379b012007-07-09 11:56:42 -07004572 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004573 struct dma_chan *chan;
4574
Dan Williamsd379b012007-07-09 11:56:42 -07004575 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004576 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004577 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004578 return;
4579 }
4580
4581 i = 0;
4582 cpu = first_cpu(cpu_online_map);
4583
Mike Travis0e12f842008-05-12 21:21:13 +02004584 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07004585 chan = net_dma->channels[chan_idx];
4586
4587 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4588 + (i < (num_online_cpus() %
4589 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004590
4591 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004592 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004593 cpu = next_cpu(cpu, cpu_online_map);
4594 n--;
4595 }
4596 i++;
4597 }
Chris Leechdb217332006-06-17 21:24:58 -07004598}
4599
4600/**
4601 * netdev_dma_event - event callback for the net_dma_client
4602 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004603 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004604 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004605 */
Dan Williamsd379b012007-07-09 11:56:42 -07004606static enum dma_state_client
4607netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4608 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004609{
Dan Williamsd379b012007-07-09 11:56:42 -07004610 int i, found = 0, pos = -1;
4611 struct net_dma *net_dma =
4612 container_of(client, struct net_dma, client);
4613 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4614
4615 spin_lock(&net_dma->lock);
4616 switch (state) {
4617 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004618 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004619 if (net_dma->channels[i] == chan) {
4620 found = 1;
4621 break;
4622 } else if (net_dma->channels[i] == NULL && pos < 0)
4623 pos = i;
4624
4625 if (!found && pos >= 0) {
4626 ack = DMA_ACK;
4627 net_dma->channels[pos] = chan;
4628 cpu_set(pos, net_dma->channel_mask);
4629 net_dma_rebalance(net_dma);
4630 }
Chris Leechdb217332006-06-17 21:24:58 -07004631 break;
4632 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004633 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004634 if (net_dma->channels[i] == chan) {
4635 found = 1;
4636 pos = i;
4637 break;
4638 }
4639
4640 if (found) {
4641 ack = DMA_ACK;
4642 cpu_clear(pos, net_dma->channel_mask);
4643 net_dma->channels[i] = NULL;
4644 net_dma_rebalance(net_dma);
4645 }
Chris Leechdb217332006-06-17 21:24:58 -07004646 break;
4647 default:
4648 break;
4649 }
Dan Williamsd379b012007-07-09 11:56:42 -07004650 spin_unlock(&net_dma->lock);
4651
4652 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004653}
4654
4655/**
4656 * netdev_dma_regiser - register the networking subsystem as a DMA client
4657 */
4658static int __init netdev_dma_register(void)
4659{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004660 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4661 GFP_KERNEL);
4662 if (unlikely(!net_dma.channels)) {
4663 printk(KERN_NOTICE
4664 "netdev_dma: no memory for net_dma.channels\n");
4665 return -ENOMEM;
4666 }
Dan Williamsd379b012007-07-09 11:56:42 -07004667 spin_lock_init(&net_dma.lock);
4668 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4669 dma_async_client_register(&net_dma.client);
4670 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004671 return 0;
4672}
4673
4674#else
4675static int __init netdev_dma_register(void) { return -ENODEV; }
4676#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677
Herbert Xu7f353bf2007-08-10 15:47:58 -07004678/**
4679 * netdev_compute_feature - compute conjunction of two feature sets
4680 * @all: first feature set
4681 * @one: second feature set
4682 *
4683 * Computes a new feature set after adding a device with feature set
4684 * @one to the master device with current feature set @all. Returns
4685 * the new feature set.
4686 */
4687int netdev_compute_features(unsigned long all, unsigned long one)
4688{
4689 /* if device needs checksumming, downgrade to hw checksumming */
4690 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4691 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4692
4693 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4694 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4695 all ^= NETIF_F_HW_CSUM
4696 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4697
4698 if (one & NETIF_F_GSO)
4699 one |= NETIF_F_GSO_SOFTWARE;
4700 one |= NETIF_F_GSO;
4701
Herbert Xue2a6b852008-09-08 16:10:02 -07004702 /*
4703 * If even one device supports a GSO protocol with software fallback,
4704 * enable it for all.
4705 */
4706 all |= one & NETIF_F_GSO_SOFTWARE;
4707
Herbert Xu7f353bf2007-08-10 15:47:58 -07004708 /* If even one device supports robust GSO, enable it for all. */
4709 if (one & NETIF_F_GSO_ROBUST)
4710 all |= NETIF_F_GSO_ROBUST;
4711
4712 all &= one | NETIF_F_LLTX;
4713
4714 if (!(all & NETIF_F_ALL_CSUM))
4715 all &= ~NETIF_F_SG;
4716 if (!(all & NETIF_F_SG))
4717 all &= ~NETIF_F_GSO_MASK;
4718
4719 return all;
4720}
4721EXPORT_SYMBOL(netdev_compute_features);
4722
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004723static struct hlist_head *netdev_create_hash(void)
4724{
4725 int i;
4726 struct hlist_head *hash;
4727
4728 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4729 if (hash != NULL)
4730 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4731 INIT_HLIST_HEAD(&hash[i]);
4732
4733 return hash;
4734}
4735
Eric W. Biederman881d9662007-09-17 11:56:21 -07004736/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004737static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004738{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004739 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004740
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004741 net->dev_name_head = netdev_create_hash();
4742 if (net->dev_name_head == NULL)
4743 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004744
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004745 net->dev_index_head = netdev_create_hash();
4746 if (net->dev_index_head == NULL)
4747 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004748
4749 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004750
4751err_idx:
4752 kfree(net->dev_name_head);
4753err_name:
4754 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004755}
4756
Arjan van de Ven6579e572008-07-21 13:31:48 -07004757char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4758{
4759 struct device_driver *driver;
4760 struct device *parent;
4761
4762 if (len <= 0 || !buffer)
4763 return buffer;
4764 buffer[0] = 0;
4765
4766 parent = dev->dev.parent;
4767
4768 if (!parent)
4769 return buffer;
4770
4771 driver = parent->driver;
4772 if (driver && driver->name)
4773 strlcpy(buffer, driver->name, len);
4774 return buffer;
4775}
4776
Pavel Emelyanov46650792007-10-08 20:38:39 -07004777static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004778{
4779 kfree(net->dev_name_head);
4780 kfree(net->dev_index_head);
4781}
4782
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004783static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004784 .init = netdev_init,
4785 .exit = netdev_exit,
4786};
4787
Pavel Emelyanov46650792007-10-08 20:38:39 -07004788static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004789{
4790 struct net_device *dev, *next;
4791 /*
4792 * Push all migratable of the network devices back to the
4793 * initial network namespace
4794 */
4795 rtnl_lock();
4796 for_each_netdev_safe(net, dev, next) {
4797 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004798 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004799
4800 /* Ignore unmoveable devices (i.e. loopback) */
4801 if (dev->features & NETIF_F_NETNS_LOCAL)
4802 continue;
4803
4804 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004805 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4806 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004807 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004808 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004809 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004810 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004811 }
4812 }
4813 rtnl_unlock();
4814}
4815
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004816static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004817 .exit = default_device_exit,
4818};
4819
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820/*
4821 * Initialize the DEV module. At boot time this walks the device list and
4822 * unhooks any devices that fail to initialise (normally hardware not
4823 * present) and leaves us with a valid list of present and active devices.
4824 *
4825 */
4826
4827/*
4828 * This is called single threaded during boot, so no need
4829 * to take the rtnl semaphore.
4830 */
4831static int __init net_dev_init(void)
4832{
4833 int i, rc = -ENOMEM;
4834
4835 BUG_ON(!dev_boot_phase);
4836
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 if (dev_proc_init())
4838 goto out;
4839
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004840 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 goto out;
4842
4843 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004844 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 INIT_LIST_HEAD(&ptype_base[i]);
4846
Eric W. Biederman881d9662007-09-17 11:56:21 -07004847 if (register_pernet_subsys(&netdev_net_ops))
4848 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849
Eric W. Biedermance286d32007-09-12 13:53:49 +02004850 if (register_pernet_device(&default_device_ops))
4851 goto out;
4852
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 /*
4854 * Initialise the packet receive queues.
4855 */
4856
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004857 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858 struct softnet_data *queue;
4859
4860 queue = &per_cpu(softnet_data, i);
4861 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 queue->completion_queue = NULL;
4863 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004864
4865 queue->backlog.poll = process_backlog;
4866 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867 }
4868
Chris Leechdb217332006-06-17 21:24:58 -07004869 netdev_dma_register();
4870
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871 dev_boot_phase = 0;
4872
Carlos R. Mafra962cf362008-05-15 11:15:37 -03004873 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4874 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875
4876 hotcpu_notifier(dev_cpu_callback, 0);
4877 dst_init();
4878 dev_mcast_init();
4879 rc = 0;
4880out:
4881 return rc;
4882}
4883
4884subsys_initcall(net_dev_init);
4885
4886EXPORT_SYMBOL(__dev_get_by_index);
4887EXPORT_SYMBOL(__dev_get_by_name);
4888EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004889EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890EXPORT_SYMBOL(dev_add_pack);
4891EXPORT_SYMBOL(dev_alloc_name);
4892EXPORT_SYMBOL(dev_close);
4893EXPORT_SYMBOL(dev_get_by_flags);
4894EXPORT_SYMBOL(dev_get_by_index);
4895EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896EXPORT_SYMBOL(dev_open);
4897EXPORT_SYMBOL(dev_queue_xmit);
4898EXPORT_SYMBOL(dev_remove_pack);
4899EXPORT_SYMBOL(dev_set_allmulti);
4900EXPORT_SYMBOL(dev_set_promiscuity);
4901EXPORT_SYMBOL(dev_change_flags);
4902EXPORT_SYMBOL(dev_set_mtu);
4903EXPORT_SYMBOL(dev_set_mac_address);
4904EXPORT_SYMBOL(free_netdev);
4905EXPORT_SYMBOL(netdev_boot_setup_check);
4906EXPORT_SYMBOL(netdev_set_master);
4907EXPORT_SYMBOL(netdev_state_change);
4908EXPORT_SYMBOL(netif_receive_skb);
4909EXPORT_SYMBOL(netif_rx);
4910EXPORT_SYMBOL(register_gifconf);
4911EXPORT_SYMBOL(register_netdevice);
4912EXPORT_SYMBOL(register_netdevice_notifier);
4913EXPORT_SYMBOL(skb_checksum_help);
4914EXPORT_SYMBOL(synchronize_net);
4915EXPORT_SYMBOL(unregister_netdevice);
4916EXPORT_SYMBOL(unregister_netdevice_notifier);
4917EXPORT_SYMBOL(net_enable_timestamp);
4918EXPORT_SYMBOL(net_disable_timestamp);
4919EXPORT_SYMBOL(dev_get_flags);
4920
4921#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4922EXPORT_SYMBOL(br_handle_frame_hook);
4923EXPORT_SYMBOL(br_fdb_get_hook);
4924EXPORT_SYMBOL(br_fdb_put_hook);
4925#endif
4926
4927#ifdef CONFIG_KMOD
4928EXPORT_SYMBOL(dev_load);
4929#endif
4930
4931EXPORT_PER_CPU_SYMBOL(softnet_data);