blob: 8d133802372be7c0b27fbead20cb8a3cd36dfffb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700130#include "net-sysfs.h"
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
135 *
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
138 *
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700143 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 * --BLG
145 *
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
158 */
159
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800160#define PTYPE_HASH_SIZE (16)
161#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800164static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700165static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Chris Leechdb217332006-06-17 21:24:58 -0700167#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700168struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700172 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700173};
174
175static enum dma_state_client
176netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
178
179static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
182 },
183};
Chris Leechdb217332006-06-17 21:24:58 -0700184#endif
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * semaphore.
189 *
190 * Pure readers hold dev_base_lock for reading.
191 *
192 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700193 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
196 *
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
200 *
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205DEFINE_RWLOCK(dev_base_lock);
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207EXPORT_SYMBOL(dev_base_lock);
208
209#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Eric W. Biederman881d9662007-09-17 11:56:21 -0700212static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biederman881d9662007-09-17 11:56:21 -0700218static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900226 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
236}
237
238/* Device list removal */
239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
249}
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
252 * Our notifier list
253 */
254
Alan Sternf07d5b92006-05-09 15:23:03 -0700255static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257/*
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
260 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700261
262DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
David S. Millercf508b12008-07-22 14:16:42 -0700264#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700265/*
David S. Millerc773e842008-07-08 23:13:53 -0700266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700267 * according to dev->type
268 */
269static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
285
286static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305
306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
307{
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
315}
316
David S. Millercf508b12008-07-22 14:16:42 -0700317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319{
320 int i;
321
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
325}
David S. Millercf508b12008-07-22 14:16:42 -0700326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336#else
David S. Millercf508b12008-07-22 14:16:42 -0700337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700342{
343}
344#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/*******************************************************************************
347
348 Protocol management and registration routines
349
350*******************************************************************************/
351
352/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
356 *
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
366 */
367
368/**
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
371 *
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
375 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900376 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
379 */
380
381void dev_add_pack(struct packet_type *pt)
382{
383 int hash;
384
385 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700386 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700388 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 list_add_rcu(&pt->list, &ptype_base[hash]);
391 }
392 spin_unlock_bh(&ptype_lock);
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/**
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
398 *
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900402 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 *
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
407 */
408void __dev_remove_pack(struct packet_type *pt)
409{
410 struct list_head *head;
411 struct packet_type *pt1;
412
413 spin_lock_bh(&ptype_lock);
414
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700415 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700417 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
424 }
425 }
426
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428out:
429 spin_unlock_bh(&ptype_lock);
430}
431/**
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
434 *
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
439 *
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
442 */
443void dev_remove_pack(struct packet_type *pt)
444{
445 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 synchronize_net();
448}
449
450/******************************************************************************
451
452 Device Boot-time Settings Routines
453
454*******************************************************************************/
455
456/* Boot time configuration table */
457static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
458
459/**
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
463 *
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
467 */
468static int netdev_boot_setup_add(char *name, struct ifmap *map)
469{
470 struct netdev_boot_setup *s;
471 int i;
472
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700477 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
480 }
481 }
482
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
484}
485
486/**
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
489 *
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
494 */
495int netdev_boot_setup_check(struct net_device *dev)
496{
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
499
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700502 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
508 }
509 }
510 return 0;
511}
512
513
514/**
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
518 *
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
523 */
524unsigned long netdev_boot_base(const char *prefix, int unit)
525{
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
529
530 sprintf(name, "%s%d", prefix, unit);
531
532 /*
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
535 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700536 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 1;
538
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
543}
544
545/*
546 * Saves at boot time configured settings for any netdevice.
547 */
548int __init netdev_boot_setup(char *str)
549{
550 int ints[5];
551 struct ifmap map;
552
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
556
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
567
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
570}
571
572__setup("netdev=", netdev_boot_setup);
573
574/*******************************************************************************
575
576 Device Interface Subroutines
577
578*******************************************************************************/
579
580/**
581 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700582 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 * @name: name to find
584 *
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
590 */
591
Eric W. Biederman881d9662007-09-17 11:56:21 -0700592struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
594 struct hlist_node *p;
595
Eric W. Biederman881d9662007-09-17 11:56:21 -0700596 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
601 }
602 return NULL;
603}
604
605/**
606 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700607 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * @name: name to find
609 *
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
615 */
616
Eric W. Biederman881d9662007-09-17 11:56:21 -0700617struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct net_device *dev;
620
621 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
627}
628
629/**
630 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @ifindex: index of device
633 *
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct hlist_node *p;
644
Eric W. Biederman881d9662007-09-17 11:56:21 -0700645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
650 }
651 return NULL;
652}
653
654
655/**
656 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700657 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 * @ifindex: index of device
659 *
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct net_device *dev;
669
670 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700671 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
676}
677
678/**
679 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700680 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 * @type: media type of device
682 * @ha: hardware address
683 *
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
688 *
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
691 */
692
Eric W. Biederman881d9662007-09-17 11:56:21 -0700693struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 struct net_device *dev;
696
697 ASSERT_RTNL();
698
Denis V. Lunev81103a52007-12-12 10:47:38 -0800699 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700702 return dev;
703
704 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300707EXPORT_SYMBOL(dev_getbyhwaddr);
708
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710{
711 struct net_device *dev;
712
713 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700714 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700715 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700716 return dev;
717
718 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719}
720
721EXPORT_SYMBOL(__dev_getfirstbyhwtype);
722
Eric W. Biederman881d9662007-09-17 11:56:21 -0700723struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
725 struct net_device *dev;
726
727 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700728 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700729 if (dev)
730 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 rtnl_unlock();
732 return dev;
733}
734
735EXPORT_SYMBOL(dev_getfirstbyhwtype);
736
737/**
738 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700739 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
742 *
743 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900744 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
747 */
748
Eric W. Biederman881d9662007-09-17 11:56:21 -0700749struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700755 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700758 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 break;
760 }
761 }
762 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700763 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764}
765
766/**
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
769 *
770 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800774int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700776 if (*name == '\0')
777 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
782
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
787 }
788 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
791/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200795 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 *
797 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 */
805
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200806static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
808 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700811 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct net_device *d;
813
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
816 /*
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
820 */
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
823
824 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (!inuse)
827 return -ENOMEM;
828
Eric W. Biederman881d9662007-09-17 11:56:21 -0700829 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
834
835 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200836 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
839 }
840
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
843 }
844
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
852 */
853 return -ENFILE;
854}
855
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200856/**
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
868 */
869
870int dev_alloc_name(struct net_device *dev, const char *name)
871{
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
875
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
882}
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
893int dev_change_name(struct net_device *dev, char *newname)
894{
Herbert Xufcc5a032007-07-30 17:03:38 -0700895 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700898 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900901 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900903 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913 memcpy(oldname, dev->name, IFNAMSIZ);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
919 strcpy(newname, dev->name);
920 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700921 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -EEXIST;
923 else
924 strlcpy(dev->name, newname, IFNAMSIZ);
925
Herbert Xufcc5a032007-07-30 17:03:38 -0700926rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700927 err = device_rename(&dev->dev, dev->name);
928 if (err) {
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 return err;
931 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700932
933 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600934 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700936 write_unlock_bh(&dev_base_lock);
937
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700939 ret = notifier_to_errno(ret);
940
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
950 }
951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
953 return err;
954}
955
956/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700957 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700958 * @dev: device to cause notification
959 *
960 * Called to indicate a device has changed features.
961 */
962void netdev_features_change(struct net_device *dev)
963{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700964 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700965}
966EXPORT_SYMBOL(netdev_features_change);
967
968/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 * netdev_state_change - device changes state
970 * @dev: device to cause notification
971 *
972 * Called to indicate a device has changed state. This function calls
973 * the notifier chains for netdev_chain and sends a NEWLINK message
974 * to the routing socket.
975 */
976void netdev_state_change(struct net_device *dev)
977{
978 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700979 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
981 }
982}
983
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700984void netdev_bonding_change(struct net_device *dev)
985{
986 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
987}
988EXPORT_SYMBOL(netdev_bonding_change);
989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990/**
991 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700992 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * @name: name of interface
994 *
995 * If a network interface is not present and the process has suitable
996 * privileges this function loads the module. If module loading is not
997 * available in this kernel then it becomes a nop.
998 */
999
Eric W. Biederman881d9662007-09-17 11:56:21 -07001000void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001002 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001005 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 read_unlock(&dev_base_lock);
1007
1008 if (!dev && capable(CAP_SYS_MODULE))
1009 request_module("%s", name);
1010}
1011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012/**
1013 * dev_open - prepare an interface for use.
1014 * @dev: device to open
1015 *
1016 * Takes a device from down to up state. The device's private open
1017 * function is invoked and then the multicast lists are loaded. Finally
1018 * the device is moved into the up state and a %NETDEV_UP message is
1019 * sent to the netdev notifier chain.
1020 *
1021 * Calling this function on an active interface is a nop. On a failure
1022 * a negative errno code is returned.
1023 */
1024int dev_open(struct net_device *dev)
1025{
1026 int ret = 0;
1027
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001028 ASSERT_RTNL();
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 /*
1031 * Is it already up?
1032 */
1033
1034 if (dev->flags & IFF_UP)
1035 return 0;
1036
1037 /*
1038 * Is it even present?
1039 */
1040 if (!netif_device_present(dev))
1041 return -ENODEV;
1042
1043 /*
1044 * Call device private open method
1045 */
1046 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001047
1048 if (dev->validate_addr)
1049 ret = dev->validate_addr(dev);
1050
1051 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001054 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 * If it went open OK then:
1056 */
1057
Jeff Garzikbada3392007-10-23 20:19:37 -07001058 if (ret)
1059 clear_bit(__LINK_STATE_START, &dev->state);
1060 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 /*
1062 * Set the flags.
1063 */
1064 dev->flags |= IFF_UP;
1065
1066 /*
1067 * Initialize multicasting status
1068 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001069 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
1071 /*
1072 * Wakeup transmit queue engine
1073 */
1074 dev_activate(dev);
1075
1076 /*
1077 * ... and announce new interface.
1078 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001079 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 return ret;
1083}
1084
1085/**
1086 * dev_close - shutdown an interface.
1087 * @dev: device to shutdown
1088 *
1089 * This function moves an active device into down state. A
1090 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1091 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1092 * chain.
1093 */
1094int dev_close(struct net_device *dev)
1095{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001096 ASSERT_RTNL();
1097
David S. Miller9d5010d2007-09-12 14:33:25 +02001098 might_sleep();
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (!(dev->flags & IFF_UP))
1101 return 0;
1102
1103 /*
1104 * Tell people we are going down, so that they can
1105 * prepare to death, when device is still operating.
1106 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001107 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 clear_bit(__LINK_STATE_START, &dev->state);
1110
1111 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001112 * it can be even on different cpu. So just clear netif_running().
1113 *
1114 * dev->stop() will invoke napi_disable() on all of it's
1115 * napi_struct instances on this device.
1116 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001119 dev_deactivate(dev);
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 /*
1122 * Call the device specific close. This cannot fail.
1123 * Only if device is UP
1124 *
1125 * We allow it to be called even after a DETACH hot-plug
1126 * event.
1127 */
1128 if (dev->stop)
1129 dev->stop(dev);
1130
1131 /*
1132 * Device is now down.
1133 */
1134
1135 dev->flags &= ~IFF_UP;
1136
1137 /*
1138 * Tell people we are down
1139 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001140 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 return 0;
1143}
1144
1145
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001146/**
1147 * dev_disable_lro - disable Large Receive Offload on a device
1148 * @dev: device
1149 *
1150 * Disable Large Receive Offload (LRO) on a net device. Must be
1151 * called under RTNL. This is needed if received packets may be
1152 * forwarded to another interface.
1153 */
1154void dev_disable_lro(struct net_device *dev)
1155{
1156 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1157 dev->ethtool_ops->set_flags) {
1158 u32 flags = dev->ethtool_ops->get_flags(dev);
1159 if (flags & ETH_FLAG_LRO) {
1160 flags &= ~ETH_FLAG_LRO;
1161 dev->ethtool_ops->set_flags(dev, flags);
1162 }
1163 }
1164 WARN_ON(dev->features & NETIF_F_LRO);
1165}
1166EXPORT_SYMBOL(dev_disable_lro);
1167
1168
Eric W. Biederman881d9662007-09-17 11:56:21 -07001169static int dev_boot_phase = 1;
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171/*
1172 * Device change register/unregister. These are not inline or static
1173 * as we export them to the world.
1174 */
1175
1176/**
1177 * register_netdevice_notifier - register a network notifier block
1178 * @nb: notifier
1179 *
1180 * Register a notifier to be called when network device events occur.
1181 * The notifier passed is linked into the kernel structures and must
1182 * not be reused until it has been unregistered. A negative errno code
1183 * is returned on a failure.
1184 *
1185 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001186 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 * view of the network device list.
1188 */
1189
1190int register_netdevice_notifier(struct notifier_block *nb)
1191{
1192 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001193 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 int err;
1196
1197 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001198 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001199 if (err)
1200 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001201 if (dev_boot_phase)
1202 goto unlock;
1203 for_each_net(net) {
1204 for_each_netdev(net, dev) {
1205 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1206 err = notifier_to_errno(err);
1207 if (err)
1208 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Eric W. Biederman881d9662007-09-17 11:56:21 -07001210 if (!(dev->flags & IFF_UP))
1211 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001212
Eric W. Biederman881d9662007-09-17 11:56:21 -07001213 nb->notifier_call(nb, NETDEV_UP, dev);
1214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001216
1217unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 rtnl_unlock();
1219 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001220
1221rollback:
1222 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001223 for_each_net(net) {
1224 for_each_netdev(net, dev) {
1225 if (dev == last)
1226 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001227
Eric W. Biederman881d9662007-09-17 11:56:21 -07001228 if (dev->flags & IFF_UP) {
1229 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1230 nb->notifier_call(nb, NETDEV_DOWN, dev);
1231 }
1232 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001233 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001234 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001235
1236 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001237 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
1240/**
1241 * unregister_netdevice_notifier - unregister a network notifier block
1242 * @nb: notifier
1243 *
1244 * Unregister a notifier previously registered by
1245 * register_netdevice_notifier(). The notifier is unlinked into the
1246 * kernel structures and may then be reused. A negative errno code
1247 * is returned on a failure.
1248 */
1249
1250int unregister_netdevice_notifier(struct notifier_block *nb)
1251{
Herbert Xu9f514952006-03-25 01:24:25 -08001252 int err;
1253
1254 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001255 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001256 rtnl_unlock();
1257 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258}
1259
1260/**
1261 * call_netdevice_notifiers - call all network notifier blocks
1262 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001263 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 *
1265 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001266 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 */
1268
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001269int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001271 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
1273
1274/* When > 0 there are consumers of rx skb time stamps */
1275static atomic_t netstamp_needed = ATOMIC_INIT(0);
1276
1277void net_enable_timestamp(void)
1278{
1279 atomic_inc(&netstamp_needed);
1280}
1281
1282void net_disable_timestamp(void)
1283{
1284 atomic_dec(&netstamp_needed);
1285}
1286
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001287static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
1289 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001290 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001291 else
1292 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
1295/*
1296 * Support routine. Sends outgoing frames to any network
1297 * taps currently in use.
1298 */
1299
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001300static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001303
1304 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
1306 rcu_read_lock();
1307 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1308 /* Never send packets back to the socket
1309 * they originated from - MvS (miquels@drinkel.ow.org)
1310 */
1311 if ((ptype->dev == dev || !ptype->dev) &&
1312 (ptype->af_packet_priv == NULL ||
1313 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1314 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1315 if (!skb2)
1316 break;
1317
1318 /* skb->nh should be correctly
1319 set by sender, so that the second statement is
1320 just protection against buggy protocols.
1321 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001322 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001324 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001325 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (net_ratelimit())
1327 printk(KERN_CRIT "protocol %04x is "
1328 "buggy, dev %s\n",
1329 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001330 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 }
1332
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001333 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001335 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 }
1337 }
1338 rcu_read_unlock();
1339}
1340
Denis Vlasenko56079432006-03-29 15:57:29 -08001341
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001342static inline void __netif_reschedule(struct Qdisc *q)
1343{
1344 struct softnet_data *sd;
1345 unsigned long flags;
1346
1347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 q->next_sched = sd->output_queue;
1350 sd->output_queue = q;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1353}
1354
David S. Miller37437bb2008-07-16 02:15:04 -07001355void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001356{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001359}
1360EXPORT_SYMBOL(__netif_schedule);
1361
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001362void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001363{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001364 if (atomic_dec_and_test(&skb->users)) {
1365 struct softnet_data *sd;
1366 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001367
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001368 local_irq_save(flags);
1369 sd = &__get_cpu_var(softnet_data);
1370 skb->next = sd->completion_queue;
1371 sd->completion_queue = skb;
1372 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1373 local_irq_restore(flags);
1374 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001375}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001376EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001377
1378void dev_kfree_skb_any(struct sk_buff *skb)
1379{
1380 if (in_irq() || irqs_disabled())
1381 dev_kfree_skb_irq(skb);
1382 else
1383 dev_kfree_skb(skb);
1384}
1385EXPORT_SYMBOL(dev_kfree_skb_any);
1386
1387
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001388/**
1389 * netif_device_detach - mark device as removed
1390 * @dev: network device
1391 *
1392 * Mark device as removed from system and therefore no longer available.
1393 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001394void netif_device_detach(struct net_device *dev)
1395{
1396 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1397 netif_running(dev)) {
1398 netif_stop_queue(dev);
1399 }
1400}
1401EXPORT_SYMBOL(netif_device_detach);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403/**
1404 * netif_device_attach - mark device as attached
1405 * @dev: network device
1406 *
1407 * Mark device as attached from system and restart if needed.
1408 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001409void netif_device_attach(struct net_device *dev)
1410{
1411 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1412 netif_running(dev)) {
1413 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001415 }
1416}
1417EXPORT_SYMBOL(netif_device_attach);
1418
Ben Hutchings6de329e2008-06-16 17:02:28 -07001419static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1420{
1421 return ((features & NETIF_F_GEN_CSUM) ||
1422 ((features & NETIF_F_IP_CSUM) &&
1423 protocol == htons(ETH_P_IP)) ||
1424 ((features & NETIF_F_IPV6_CSUM) &&
1425 protocol == htons(ETH_P_IPV6)));
1426}
1427
1428static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1429{
1430 if (can_checksum_protocol(dev->features, skb->protocol))
1431 return true;
1432
1433 if (skb->protocol == htons(ETH_P_8021Q)) {
1434 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1435 if (can_checksum_protocol(dev->features & dev->vlan_features,
1436 veh->h_vlan_encapsulated_proto))
1437 return true;
1438 }
1439
1440 return false;
1441}
Denis Vlasenko56079432006-03-29 15:57:29 -08001442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443/*
1444 * Invalidate hardware checksum when packet is to be mangled, and
1445 * complete checksum manually on outgoing path.
1446 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001447int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448{
Al Virod3bc23e2006-11-14 21:24:49 -08001449 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001450 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Patrick McHardy84fa7932006-08-29 16:44:56 -07001452 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001453 goto out_set_summed;
1454
1455 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001456 /* Let GSO fix up the checksum. */
1457 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 }
1459
Herbert Xua0308472007-10-15 01:47:15 -07001460 offset = skb->csum_start - skb_headroom(skb);
1461 BUG_ON(offset >= skb_headlen(skb));
1462 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1463
1464 offset += skb->csum_offset;
1465 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1466
1467 if (skb_cloned(skb) &&
1468 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1470 if (ret)
1471 goto out;
1472 }
1473
Herbert Xua0308472007-10-15 01:47:15 -07001474 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001475out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001477out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return ret;
1479}
1480
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001481/**
1482 * skb_gso_segment - Perform segmentation on skb.
1483 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001484 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001485 *
1486 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001487 *
1488 * It may return NULL if the skb requires no segmentation. This is
1489 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001490 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001491struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001492{
1493 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1494 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001495 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001496 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001497
1498 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001499
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001500 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001501 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001502 __skb_pull(skb, skb->mac_len);
1503
Herbert Xuf9d106a2007-04-23 22:36:13 -07001504 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001505 if (skb_header_cloned(skb) &&
1506 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1507 return ERR_PTR(err);
1508 }
1509
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001510 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001511 list_for_each_entry_rcu(ptype,
1512 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001513 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001514 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001515 err = ptype->gso_send_check(skb);
1516 segs = ERR_PTR(err);
1517 if (err || skb_gso_ok(skb, features))
1518 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001519 __skb_push(skb, (skb->data -
1520 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001521 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001522 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001523 break;
1524 }
1525 }
1526 rcu_read_unlock();
1527
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001528 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001529
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001530 return segs;
1531}
1532
1533EXPORT_SYMBOL(skb_gso_segment);
1534
Herbert Xufb286bb2005-11-10 13:01:24 -08001535/* Take action when hardware reception checksum errors are detected. */
1536#ifdef CONFIG_BUG
1537void netdev_rx_csum_fault(struct net_device *dev)
1538{
1539 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001540 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001541 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001542 dump_stack();
1543 }
1544}
1545EXPORT_SYMBOL(netdev_rx_csum_fault);
1546#endif
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548/* Actually, we should eliminate this check as soon as we know, that:
1549 * 1. IOMMU is present and allows to map all the memory.
1550 * 2. No high memory really exists on this machine.
1551 */
1552
1553static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1554{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001555#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 int i;
1557
1558 if (dev->features & NETIF_F_HIGHDMA)
1559 return 0;
1560
1561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1562 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1563 return 1;
1564
Herbert Xu3d3a8532006-06-27 13:33:10 -07001565#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 return 0;
1567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001569struct dev_gso_cb {
1570 void (*destructor)(struct sk_buff *skb);
1571};
1572
1573#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1574
1575static void dev_gso_skb_destructor(struct sk_buff *skb)
1576{
1577 struct dev_gso_cb *cb;
1578
1579 do {
1580 struct sk_buff *nskb = skb->next;
1581
1582 skb->next = nskb->next;
1583 nskb->next = NULL;
1584 kfree_skb(nskb);
1585 } while (skb->next);
1586
1587 cb = DEV_GSO_CB(skb);
1588 if (cb->destructor)
1589 cb->destructor(skb);
1590}
1591
1592/**
1593 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1594 * @skb: buffer to segment
1595 *
1596 * This function segments the given skb and stores the list of segments
1597 * in skb->next.
1598 */
1599static int dev_gso_segment(struct sk_buff *skb)
1600{
1601 struct net_device *dev = skb->dev;
1602 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001603 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1604 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605
Herbert Xu576a30e2006-06-27 13:22:38 -07001606 segs = skb_gso_segment(skb, features);
1607
1608 /* Verifying header integrity only. */
1609 if (!segs)
1610 return 0;
1611
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001612 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001613 return PTR_ERR(segs);
1614
1615 skb->next = segs;
1616 DEV_GSO_CB(skb)->destructor = skb->destructor;
1617 skb->destructor = dev_gso_skb_destructor;
1618
1619 return 0;
1620}
1621
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001622int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1623 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001624{
1625 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001626 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001627 dev_queue_xmit_nit(skb, dev);
1628
Herbert Xu576a30e2006-06-27 13:22:38 -07001629 if (netif_needs_gso(dev, skb)) {
1630 if (unlikely(dev_gso_segment(skb)))
1631 goto out_kfree_skb;
1632 if (skb->next)
1633 goto gso;
1634 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001635
Herbert Xu576a30e2006-06-27 13:22:38 -07001636 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001637 }
1638
Herbert Xu576a30e2006-06-27 13:22:38 -07001639gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001640 do {
1641 struct sk_buff *nskb = skb->next;
1642 int rc;
1643
1644 skb->next = nskb->next;
1645 nskb->next = NULL;
1646 rc = dev->hard_start_xmit(nskb, dev);
1647 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001648 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001649 skb->next = nskb;
1650 return rc;
1651 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001652 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001653 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001654 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001655
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001656 skb->destructor = DEV_GSO_CB(skb)->destructor;
1657
1658out_kfree_skb:
1659 kfree_skb(skb);
1660 return 0;
1661}
1662
David S. Millerb6b2fed2008-07-21 09:48:06 -07001663static u32 simple_tx_hashrnd;
1664static int simple_tx_hashrnd_initialized = 0;
1665
David S. Miller8f0f2222008-07-15 03:47:03 -07001666static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1667{
David S. Millerb6b2fed2008-07-21 09:48:06 -07001668 u32 addr1, addr2, ports;
1669 u32 hash, ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001670 u8 ip_proto;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001671
1672 if (unlikely(!simple_tx_hashrnd_initialized)) {
1673 get_random_bytes(&simple_tx_hashrnd, 4);
1674 simple_tx_hashrnd_initialized = 1;
1675 }
David S. Miller8f0f2222008-07-15 03:47:03 -07001676
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001680 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr;
David S. Miller8f0f2222008-07-15 03:47:03 -07001682 ihl = ip_hdr(skb)->ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001683 break;
1684 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001686 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1687 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
David S. Miller8f0f2222008-07-15 03:47:03 -07001688 ihl = (40 >> 2);
David S. Miller8f0f2222008-07-15 03:47:03 -07001689 break;
1690 default:
1691 return 0;
1692 }
1693
David S. Miller8f0f2222008-07-15 03:47:03 -07001694
1695 switch (ip_proto) {
1696 case IPPROTO_TCP:
1697 case IPPROTO_UDP:
1698 case IPPROTO_DCCP:
1699 case IPPROTO_ESP:
1700 case IPPROTO_AH:
1701 case IPPROTO_SCTP:
1702 case IPPROTO_UDPLITE:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001703 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
David S. Miller8f0f2222008-07-15 03:47:03 -07001704 break;
1705
1706 default:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001707 ports = 0;
David S. Miller8f0f2222008-07-15 03:47:03 -07001708 break;
1709 }
1710
David S. Millerb6b2fed2008-07-21 09:48:06 -07001711 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1712
1713 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001714}
1715
David S. Millere8a04642008-07-17 00:34:19 -07001716static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1717 struct sk_buff *skb)
1718{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001719 u16 queue_index = 0;
1720
David S. Millereae792b2008-07-15 03:03:33 -07001721 if (dev->select_queue)
1722 queue_index = dev->select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001723 else if (dev->real_num_tx_queues > 1)
1724 queue_index = simple_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001725
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001726 skb_set_queue_mapping(skb, queue_index);
1727 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001728}
1729
Dave Jonesd29f7492008-07-22 14:09:06 -07001730/**
1731 * dev_queue_xmit - transmit a buffer
1732 * @skb: buffer to transmit
1733 *
1734 * Queue a buffer for transmission to a network device. The caller must
1735 * have set the device and priority and built the buffer before calling
1736 * this function. The function can be called from an interrupt.
1737 *
1738 * A negative errno code is returned on a failure. A success does not
1739 * guarantee the frame will be transmitted as it may be dropped due
1740 * to congestion or traffic shaping.
1741 *
1742 * -----------------------------------------------------------------------------------
1743 * I notice this method can also return errors from the queue disciplines,
1744 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1745 * be positive.
1746 *
1747 * Regardless of the return value, the skb is consumed, so it is currently
1748 * difficult to retry a send to this method. (You can bump the ref count
1749 * before sending to hold a reference for retry if you are careful.)
1750 *
1751 * When calling this method, interrupts MUST be enabled. This is because
1752 * the BH enable code must have IRQs enabled so that it will not deadlock.
1753 * --BLG
1754 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755int dev_queue_xmit(struct sk_buff *skb)
1756{
1757 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001758 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 struct Qdisc *q;
1760 int rc = -ENOMEM;
1761
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001762 /* GSO will handle the following emulations directly. */
1763 if (netif_needs_gso(dev, skb))
1764 goto gso;
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 if (skb_shinfo(skb)->frag_list &&
1767 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001768 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 goto out_kfree_skb;
1770
1771 /* Fragmented skb is linearized if device does not support SG,
1772 * or if at least one of fragments is in highmem and device
1773 * does not support DMA from it.
1774 */
1775 if (skb_shinfo(skb)->nr_frags &&
1776 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001777 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 goto out_kfree_skb;
1779
1780 /* If packet is not checksummed and device does not support
1781 * checksumming for this protocol, complete checksumming here.
1782 */
Herbert Xu663ead32007-04-09 11:59:07 -07001783 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1784 skb_set_transport_header(skb, skb->csum_start -
1785 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001786 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1787 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001790gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001791 /* Disable soft irqs for various locks below. Also
1792 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001794 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
David S. Millereae792b2008-07-15 03:03:33 -07001796 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001797 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799#ifdef CONFIG_NET_CLS_ACT
1800 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1801#endif
1802 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001803 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
David S. Miller37437bb2008-07-16 02:15:04 -07001805 spin_lock(root_lock);
1806
David S. Millera9312ae2008-08-17 21:51:03 -07001807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001808 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001809 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001813 }
David S. Miller37437bb2008-07-16 02:15:04 -07001814 spin_unlock(root_lock);
1815
David S. Miller37437bb2008-07-16 02:15:04 -07001816 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
1818
1819 /* The device has no queue. Common case for software devices:
1820 loopback, all the sorts of tunnels...
1821
Herbert Xu932ff272006-06-09 12:20:56 -07001822 Really, it is unlikely that netif_tx_lock protection is necessary
1823 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 counters.)
1825 However, it is possible, that they rely on protection
1826 made by us here.
1827
1828 Check this and shot the lock. It is not prone from deadlocks.
1829 Either shot noqueue qdisc, it is even simpler 8)
1830 */
1831 if (dev->flags & IFF_UP) {
1832 int cpu = smp_processor_id(); /* ok because BHs are off */
1833
David S. Millerc773e842008-07-08 23:13:53 -07001834 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
David S. Millerc773e842008-07-08 23:13:53 -07001836 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001838 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001840 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001841 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 goto out;
1843 }
1844 }
David S. Millerc773e842008-07-08 23:13:53 -07001845 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 if (net_ratelimit())
1847 printk(KERN_CRIT "Virtual device %s asks to "
1848 "queue packet!\n", dev->name);
1849 } else {
1850 /* Recursion is detected! It is possible,
1851 * unfortunately */
1852 if (net_ratelimit())
1853 printk(KERN_CRIT "Dead loop on virtual device "
1854 "%s, fix it urgently!\n", dev->name);
1855 }
1856 }
1857
1858 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001859 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
1861out_kfree_skb:
1862 kfree_skb(skb);
1863 return rc;
1864out:
Herbert Xud4828d82006-06-22 02:28:18 -07001865 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 return rc;
1867}
1868
1869
1870/*=======================================================================
1871 Receiver routines
1872 =======================================================================*/
1873
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001874int netdev_max_backlog __read_mostly = 1000;
1875int netdev_budget __read_mostly = 300;
1876int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
1878DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1879
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881/**
1882 * netif_rx - post buffer to the network code
1883 * @skb: buffer to post
1884 *
1885 * This function receives a packet from a device driver and queues it for
1886 * the upper (protocol) levels to process. It always succeeds. The buffer
1887 * may be dropped during processing for congestion control or by the
1888 * protocol layers.
1889 *
1890 * return values:
1891 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 * NET_RX_DROP (packet was dropped)
1893 *
1894 */
1895
1896int netif_rx(struct sk_buff *skb)
1897{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 struct softnet_data *queue;
1899 unsigned long flags;
1900
1901 /* if netpoll wants it, pretend we never saw it */
1902 if (netpoll_rx(skb))
1903 return NET_RX_DROP;
1904
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001905 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001906 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
1908 /*
1909 * The code is rearranged so that the path is the most
1910 * short when CPU is congested, but is still operating.
1911 */
1912 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 queue = &__get_cpu_var(softnet_data);
1914
1915 __get_cpu_var(netdev_rx_stat).total++;
1916 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1917 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001921 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
1923
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001924 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 goto enqueue;
1926 }
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 __get_cpu_var(netdev_rx_stat).dropped++;
1929 local_irq_restore(flags);
1930
1931 kfree_skb(skb);
1932 return NET_RX_DROP;
1933}
1934
1935int netif_rx_ni(struct sk_buff *skb)
1936{
1937 int err;
1938
1939 preempt_disable();
1940 err = netif_rx(skb);
1941 if (local_softirq_pending())
1942 do_softirq();
1943 preempt_enable();
1944
1945 return err;
1946}
1947
1948EXPORT_SYMBOL(netif_rx_ni);
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950static void net_tx_action(struct softirq_action *h)
1951{
1952 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1953
1954 if (sd->completion_queue) {
1955 struct sk_buff *clist;
1956
1957 local_irq_disable();
1958 clist = sd->completion_queue;
1959 sd->completion_queue = NULL;
1960 local_irq_enable();
1961
1962 while (clist) {
1963 struct sk_buff *skb = clist;
1964 clist = clist->next;
1965
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001966 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 __kfree_skb(skb);
1968 }
1969 }
1970
1971 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07001972 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974 local_irq_disable();
1975 head = sd->output_queue;
1976 sd->output_queue = NULL;
1977 local_irq_enable();
1978
1979 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07001980 struct Qdisc *q = head;
1981 spinlock_t *root_lock;
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 head = head->next_sched;
1984
David S. Miller5fb66222008-08-02 20:02:43 -07001985 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07001986 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07001990 qdisc_run(q);
1991 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 } else {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001993 __netif_reschedule(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
1995 }
1996 }
1997}
1998
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001999static inline int deliver_skb(struct sk_buff *skb,
2000 struct packet_type *pt_prev,
2001 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
2003 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002004 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005}
2006
2007#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002008/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009struct net_bridge;
2010struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2011 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002012void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
Stephen Hemminger6229e362007-03-21 13:38:47 -07002014/*
2015 * If bridge module is loaded call bridging hook.
2016 * returns NULL if packet was consumed.
2017 */
2018struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2019 struct sk_buff *skb) __read_mostly;
2020static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2021 struct packet_type **pt_prev, int *ret,
2022 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 struct net_bridge_port *port;
2025
Stephen Hemminger6229e362007-03-21 13:38:47 -07002026 if (skb->pkt_type == PACKET_LOOPBACK ||
2027 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2028 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
2030 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002031 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002033 }
2034
Stephen Hemminger6229e362007-03-21 13:38:47 -07002035 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036}
2037#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002038#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039#endif
2040
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002041#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2042struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2043EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2044
2045static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2046 struct packet_type **pt_prev,
2047 int *ret,
2048 struct net_device *orig_dev)
2049{
2050 if (skb->dev->macvlan_port == NULL)
2051 return skb;
2052
2053 if (*pt_prev) {
2054 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2055 *pt_prev = NULL;
2056 }
2057 return macvlan_handle_frame_hook(skb);
2058}
2059#else
2060#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2061#endif
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063#ifdef CONFIG_NET_CLS_ACT
2064/* TODO: Maybe we should just force sch_ingress to be compiled in
2065 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2066 * a compare and 2 stores extra right now if we dont have it on
2067 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002068 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 * the ingress scheduler, you just cant add policies on ingress.
2070 *
2071 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002072static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002075 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002076 struct netdev_queue *rxq;
2077 int result = TC_ACT_OK;
2078 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002079
Herbert Xuf697c3e2007-10-14 00:38:47 -07002080 if (MAX_RED_LOOP < ttl++) {
2081 printk(KERN_WARNING
2082 "Redir loop detected Dropping packet (%d->%d)\n",
2083 skb->iif, dev->ifindex);
2084 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 }
2086
Herbert Xuf697c3e2007-10-14 00:38:47 -07002087 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2088 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2089
David S. Miller555353c2008-07-08 17:33:13 -07002090 rxq = &dev->rx_queue;
2091
David S. Miller83874002008-07-17 00:53:03 -07002092 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002093 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002094 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002095 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2096 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002097 spin_unlock(qdisc_lock(q));
2098 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002099
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 return result;
2101}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002102
2103static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2104 struct packet_type **pt_prev,
2105 int *ret, struct net_device *orig_dev)
2106{
David S. Miller8d50b532008-07-30 02:37:46 -07002107 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002108 goto out;
2109
2110 if (*pt_prev) {
2111 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2112 *pt_prev = NULL;
2113 } else {
2114 /* Huh? Why does turning on AF_PACKET affect this? */
2115 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2116 }
2117
2118 switch (ing_filter(skb)) {
2119 case TC_ACT_SHOT:
2120 case TC_ACT_STOLEN:
2121 kfree_skb(skb);
2122 return NULL;
2123 }
2124
2125out:
2126 skb->tc_verd = 0;
2127 return skb;
2128}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129#endif
2130
Patrick McHardybc1d0412008-07-14 22:49:30 -07002131/*
2132 * netif_nit_deliver - deliver received packets to network taps
2133 * @skb: buffer
2134 *
2135 * This function is used to deliver incoming packets to network
2136 * taps. It should be used when the normal netif_receive_skb path
2137 * is bypassed, for example because of VLAN acceleration.
2138 */
2139void netif_nit_deliver(struct sk_buff *skb)
2140{
2141 struct packet_type *ptype;
2142
2143 if (list_empty(&ptype_all))
2144 return;
2145
2146 skb_reset_network_header(skb);
2147 skb_reset_transport_header(skb);
2148 skb->mac_len = skb->network_header - skb->mac_header;
2149
2150 rcu_read_lock();
2151 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2152 if (!ptype->dev || ptype->dev == skb->dev)
2153 deliver_skb(skb, ptype, skb->dev);
2154 }
2155 rcu_read_unlock();
2156}
2157
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002158/**
2159 * netif_receive_skb - process receive buffer from network
2160 * @skb: buffer to process
2161 *
2162 * netif_receive_skb() is the main receive data processing function.
2163 * It always succeeds. The buffer may be dropped during processing
2164 * for congestion control or by the protocol layers.
2165 *
2166 * This function may only be called from softirq context and interrupts
2167 * should be enabled.
2168 *
2169 * Return values (usually ignored):
2170 * NET_RX_SUCCESS: no congestion
2171 * NET_RX_DROP: packet was dropped
2172 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173int netif_receive_skb(struct sk_buff *skb)
2174{
2175 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002176 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002177 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002179 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002182 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 return NET_RX_DROP;
2184
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002185 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002186 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Patrick McHardyc01003c2007-03-29 11:46:52 -07002188 if (!skb->iif)
2189 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002190
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002191 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002192 orig_dev = skb->dev;
2193 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002194 if (skb_bond_should_drop(skb))
2195 null_or_orig = orig_dev; /* deliver only exact match */
2196 else
2197 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002198 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 __get_cpu_var(netdev_rx_stat).total++;
2201
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002202 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002203 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002204 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 pt_prev = NULL;
2207
2208 rcu_read_lock();
2209
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002210 /* Don't receive packets in an exiting network namespace */
2211 if (!net_alive(dev_net(skb->dev)))
2212 goto out;
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214#ifdef CONFIG_NET_CLS_ACT
2215 if (skb->tc_verd & TC_NCLS) {
2216 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2217 goto ncls;
2218 }
2219#endif
2220
2221 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002222 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2223 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002224 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002225 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 pt_prev = ptype;
2227 }
2228 }
2229
2230#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002231 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2232 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234ncls:
2235#endif
2236
Stephen Hemminger6229e362007-03-21 13:38:47 -07002237 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2238 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002240 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2241 if (!skb)
2242 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002245 list_for_each_entry_rcu(ptype,
2246 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002248 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2249 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002250 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002251 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 pt_prev = ptype;
2253 }
2254 }
2255
2256 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002257 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 } else {
2259 kfree_skb(skb);
2260 /* Jamal, now you will not able to escape explaining
2261 * me how you were going to use this. :-)
2262 */
2263 ret = NET_RX_DROP;
2264 }
2265
2266out:
2267 rcu_read_unlock();
2268 return ret;
2269}
2270
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002271/* Network device is going away, flush any packets still pending */
2272static void flush_backlog(void *arg)
2273{
2274 struct net_device *dev = arg;
2275 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2276 struct sk_buff *skb, *tmp;
2277
2278 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2279 if (skb->dev == dev) {
2280 __skb_unlink(skb, &queue->input_pkt_queue);
2281 kfree_skb(skb);
2282 }
2283}
2284
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002285static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286{
2287 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2289 unsigned long start_time = jiffies;
2290
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002291 napi->weight = weight_p;
2292 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 local_irq_disable();
2296 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002297 if (!skb) {
2298 __napi_complete(napi);
2299 local_irq_enable();
2300 break;
2301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 local_irq_enable();
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002305 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002307 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308}
2309
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002310/**
2311 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002312 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002313 *
2314 * The entry's receive function will be scheduled to run
2315 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002316void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002317{
2318 unsigned long flags;
2319
2320 local_irq_save(flags);
2321 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2322 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2323 local_irq_restore(flags);
2324}
2325EXPORT_SYMBOL(__napi_schedule);
2326
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328static void net_rx_action(struct softirq_action *h)
2329{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002330 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002332 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002333 void *have;
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 local_irq_disable();
2336
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002337 while (!list_empty(list)) {
2338 struct napi_struct *n;
2339 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002341 /* If softirq window is exhuasted then punt.
2342 *
2343 * Note that this is a slight policy change from the
2344 * previous NAPI code, which would allow up to 2
2345 * jiffies to pass before breaking out. The test
2346 * used to be "jiffies - start_time > 1".
2347 */
2348 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 goto softnet_break;
2350
2351 local_irq_enable();
2352
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002353 /* Even though interrupts have been re-enabled, this
2354 * access is safe because interrupts can only add new
2355 * entries to the tail of this list, and only ->poll()
2356 * calls can remove this head entry from the list.
2357 */
2358 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002360 have = netpoll_poll_lock(n);
2361
2362 weight = n->weight;
2363
David S. Miller0a7606c2007-10-29 21:28:47 -07002364 /* This NAPI_STATE_SCHED test is for avoiding a race
2365 * with netpoll's poll_napi(). Only the entity which
2366 * obtains the lock and sees NAPI_STATE_SCHED set will
2367 * actually make the ->poll() call. Therefore we avoid
2368 * accidently calling ->poll() when NAPI is not scheduled.
2369 */
2370 work = 0;
2371 if (test_bit(NAPI_STATE_SCHED, &n->state))
2372 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002373
2374 WARN_ON_ONCE(work > weight);
2375
2376 budget -= work;
2377
2378 local_irq_disable();
2379
2380 /* Drivers must not modify the NAPI state if they
2381 * consume the entire weight. In such cases this code
2382 * still "owns" the NAPI instance and therefore can
2383 * move the instance around on the list at-will.
2384 */
David S. Millerfed17f32008-01-07 21:00:40 -08002385 if (unlikely(work == weight)) {
2386 if (unlikely(napi_disable_pending(n)))
2387 __napi_complete(n);
2388 else
2389 list_move_tail(&n->poll_list, list);
2390 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002391
2392 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 }
2394out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002395 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002396
Chris Leechdb217332006-06-17 21:24:58 -07002397#ifdef CONFIG_NET_DMA
2398 /*
2399 * There may not be any more sk_buffs coming right now, so push
2400 * any pending DMA copies to hardware
2401 */
Dan Williamsd379b012007-07-09 11:56:42 -07002402 if (!cpus_empty(net_dma.channel_mask)) {
2403 int chan_idx;
Mike Travis0e12f842008-05-12 21:21:13 +02002404 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07002405 struct dma_chan *chan = net_dma.channels[chan_idx];
2406 if (chan)
2407 dma_async_memcpy_issue_pending(chan);
2408 }
Chris Leechdb217332006-06-17 21:24:58 -07002409 }
2410#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002411
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 return;
2413
2414softnet_break:
2415 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2416 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2417 goto out;
2418}
2419
2420static gifconf_func_t * gifconf_list [NPROTO];
2421
2422/**
2423 * register_gifconf - register a SIOCGIF handler
2424 * @family: Address family
2425 * @gifconf: Function handler
2426 *
2427 * Register protocol dependent address dumping routines. The handler
2428 * that is passed must not be freed or reused until it has been replaced
2429 * by another handler.
2430 */
2431int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2432{
2433 if (family >= NPROTO)
2434 return -EINVAL;
2435 gifconf_list[family] = gifconf;
2436 return 0;
2437}
2438
2439
2440/*
2441 * Map an interface index to its name (SIOCGIFNAME)
2442 */
2443
2444/*
2445 * We need this ioctl for efficient implementation of the
2446 * if_indextoname() function required by the IPv6 API. Without
2447 * it, we would have to search all the interfaces to find a
2448 * match. --pb
2449 */
2450
Eric W. Biederman881d9662007-09-17 11:56:21 -07002451static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452{
2453 struct net_device *dev;
2454 struct ifreq ifr;
2455
2456 /*
2457 * Fetch the caller's info block.
2458 */
2459
2460 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2461 return -EFAULT;
2462
2463 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002464 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 if (!dev) {
2466 read_unlock(&dev_base_lock);
2467 return -ENODEV;
2468 }
2469
2470 strcpy(ifr.ifr_name, dev->name);
2471 read_unlock(&dev_base_lock);
2472
2473 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2474 return -EFAULT;
2475 return 0;
2476}
2477
2478/*
2479 * Perform a SIOCGIFCONF call. This structure will change
2480 * size eventually, and there is nothing I can do about it.
2481 * Thus we will need a 'compatibility mode'.
2482 */
2483
Eric W. Biederman881d9662007-09-17 11:56:21 -07002484static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485{
2486 struct ifconf ifc;
2487 struct net_device *dev;
2488 char __user *pos;
2489 int len;
2490 int total;
2491 int i;
2492
2493 /*
2494 * Fetch the caller's info block.
2495 */
2496
2497 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2498 return -EFAULT;
2499
2500 pos = ifc.ifc_buf;
2501 len = ifc.ifc_len;
2502
2503 /*
2504 * Loop over the interfaces, and write an info block for each.
2505 */
2506
2507 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002508 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 for (i = 0; i < NPROTO; i++) {
2510 if (gifconf_list[i]) {
2511 int done;
2512 if (!pos)
2513 done = gifconf_list[i](dev, NULL, 0);
2514 else
2515 done = gifconf_list[i](dev, pos + total,
2516 len - total);
2517 if (done < 0)
2518 return -EFAULT;
2519 total += done;
2520 }
2521 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
2524 /*
2525 * All done. Write the updated control block back to the caller.
2526 */
2527 ifc.ifc_len = total;
2528
2529 /*
2530 * Both BSD and Solaris return 0 here, so we do too.
2531 */
2532 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2533}
2534
2535#ifdef CONFIG_PROC_FS
2536/*
2537 * This is invoked by the /proc filesystem handler to display a device
2538 * in detail.
2539 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002541 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542{
Denis V. Luneve372c412007-11-19 22:31:54 -08002543 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002544 loff_t off;
2545 struct net_device *dev;
2546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002548 if (!*pos)
2549 return SEQ_START_TOKEN;
2550
2551 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002552 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002553 if (off++ == *pos)
2554 return dev;
2555
2556 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557}
2558
2559void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2560{
Denis V. Luneve372c412007-11-19 22:31:54 -08002561 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002563 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002564 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565}
2566
2567void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002568 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569{
2570 read_unlock(&dev_base_lock);
2571}
2572
2573static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2574{
Rusty Russellc45d2862007-03-28 14:29:08 -07002575 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576
Rusty Russell5a1b5892007-04-28 21:04:03 -07002577 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2578 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2579 dev->name, stats->rx_bytes, stats->rx_packets,
2580 stats->rx_errors,
2581 stats->rx_dropped + stats->rx_missed_errors,
2582 stats->rx_fifo_errors,
2583 stats->rx_length_errors + stats->rx_over_errors +
2584 stats->rx_crc_errors + stats->rx_frame_errors,
2585 stats->rx_compressed, stats->multicast,
2586 stats->tx_bytes, stats->tx_packets,
2587 stats->tx_errors, stats->tx_dropped,
2588 stats->tx_fifo_errors, stats->collisions,
2589 stats->tx_carrier_errors +
2590 stats->tx_aborted_errors +
2591 stats->tx_window_errors +
2592 stats->tx_heartbeat_errors,
2593 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594}
2595
2596/*
2597 * Called from the PROCfs module. This now uses the new arbitrary sized
2598 * /proc/net interface to create /proc/net/dev
2599 */
2600static int dev_seq_show(struct seq_file *seq, void *v)
2601{
2602 if (v == SEQ_START_TOKEN)
2603 seq_puts(seq, "Inter-| Receive "
2604 " | Transmit\n"
2605 " face |bytes packets errs drop fifo frame "
2606 "compressed multicast|bytes packets errs "
2607 "drop fifo colls carrier compressed\n");
2608 else
2609 dev_seq_printf_stats(seq, v);
2610 return 0;
2611}
2612
2613static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2614{
2615 struct netif_rx_stats *rc = NULL;
2616
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002617 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002618 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 rc = &per_cpu(netdev_rx_stat, *pos);
2620 break;
2621 } else
2622 ++*pos;
2623 return rc;
2624}
2625
2626static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2627{
2628 return softnet_get_online(pos);
2629}
2630
2631static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2632{
2633 ++*pos;
2634 return softnet_get_online(pos);
2635}
2636
2637static void softnet_seq_stop(struct seq_file *seq, void *v)
2638{
2639}
2640
2641static int softnet_seq_show(struct seq_file *seq, void *v)
2642{
2643 struct netif_rx_stats *s = v;
2644
2645 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002646 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002647 0, 0, 0, 0, /* was fastroute */
2648 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 return 0;
2650}
2651
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002652static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 .start = dev_seq_start,
2654 .next = dev_seq_next,
2655 .stop = dev_seq_stop,
2656 .show = dev_seq_show,
2657};
2658
2659static int dev_seq_open(struct inode *inode, struct file *file)
2660{
Denis V. Luneve372c412007-11-19 22:31:54 -08002661 return seq_open_net(inode, file, &dev_seq_ops,
2662 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663}
2664
Arjan van de Ven9a321442007-02-12 00:55:35 -08002665static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 .owner = THIS_MODULE,
2667 .open = dev_seq_open,
2668 .read = seq_read,
2669 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002670 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671};
2672
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002673static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 .start = softnet_seq_start,
2675 .next = softnet_seq_next,
2676 .stop = softnet_seq_stop,
2677 .show = softnet_seq_show,
2678};
2679
2680static int softnet_seq_open(struct inode *inode, struct file *file)
2681{
2682 return seq_open(file, &softnet_seq_ops);
2683}
2684
Arjan van de Ven9a321442007-02-12 00:55:35 -08002685static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 .owner = THIS_MODULE,
2687 .open = softnet_seq_open,
2688 .read = seq_read,
2689 .llseek = seq_lseek,
2690 .release = seq_release,
2691};
2692
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002693static void *ptype_get_idx(loff_t pos)
2694{
2695 struct packet_type *pt = NULL;
2696 loff_t i = 0;
2697 int t;
2698
2699 list_for_each_entry_rcu(pt, &ptype_all, list) {
2700 if (i == pos)
2701 return pt;
2702 ++i;
2703 }
2704
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002705 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002706 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2707 if (i == pos)
2708 return pt;
2709 ++i;
2710 }
2711 }
2712 return NULL;
2713}
2714
2715static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002716 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002717{
2718 rcu_read_lock();
2719 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2720}
2721
2722static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2723{
2724 struct packet_type *pt;
2725 struct list_head *nxt;
2726 int hash;
2727
2728 ++*pos;
2729 if (v == SEQ_START_TOKEN)
2730 return ptype_get_idx(0);
2731
2732 pt = v;
2733 nxt = pt->list.next;
2734 if (pt->type == htons(ETH_P_ALL)) {
2735 if (nxt != &ptype_all)
2736 goto found;
2737 hash = 0;
2738 nxt = ptype_base[0].next;
2739 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002740 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002741
2742 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002743 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002744 return NULL;
2745 nxt = ptype_base[hash].next;
2746 }
2747found:
2748 return list_entry(nxt, struct packet_type, list);
2749}
2750
2751static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002752 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002753{
2754 rcu_read_unlock();
2755}
2756
2757static void ptype_seq_decode(struct seq_file *seq, void *sym)
2758{
2759#ifdef CONFIG_KALLSYMS
2760 unsigned long offset = 0, symsize;
2761 const char *symname;
2762 char *modname;
2763 char namebuf[128];
2764
2765 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2766 &modname, namebuf);
2767
2768 if (symname) {
2769 char *delim = ":";
2770
2771 if (!modname)
2772 modname = delim = "";
2773 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2774 symname, offset);
2775 return;
2776 }
2777#endif
2778
2779 seq_printf(seq, "[%p]", sym);
2780}
2781
2782static int ptype_seq_show(struct seq_file *seq, void *v)
2783{
2784 struct packet_type *pt = v;
2785
2786 if (v == SEQ_START_TOKEN)
2787 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002788 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002789 if (pt->type == htons(ETH_P_ALL))
2790 seq_puts(seq, "ALL ");
2791 else
2792 seq_printf(seq, "%04x", ntohs(pt->type));
2793
2794 seq_printf(seq, " %-8s ",
2795 pt->dev ? pt->dev->name : "");
2796 ptype_seq_decode(seq, pt->func);
2797 seq_putc(seq, '\n');
2798 }
2799
2800 return 0;
2801}
2802
2803static const struct seq_operations ptype_seq_ops = {
2804 .start = ptype_seq_start,
2805 .next = ptype_seq_next,
2806 .stop = ptype_seq_stop,
2807 .show = ptype_seq_show,
2808};
2809
2810static int ptype_seq_open(struct inode *inode, struct file *file)
2811{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002812 return seq_open_net(inode, file, &ptype_seq_ops,
2813 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002814}
2815
2816static const struct file_operations ptype_seq_fops = {
2817 .owner = THIS_MODULE,
2818 .open = ptype_seq_open,
2819 .read = seq_read,
2820 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002821 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002822};
2823
2824
Pavel Emelyanov46650792007-10-08 20:38:39 -07002825static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
2827 int rc = -ENOMEM;
2828
Eric W. Biederman881d9662007-09-17 11:56:21 -07002829 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002831 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002833 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002834 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002835
Eric W. Biederman881d9662007-09-17 11:56:21 -07002836 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002837 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 rc = 0;
2839out:
2840 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002841out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002842 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002844 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002846 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 goto out;
2848}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002849
Pavel Emelyanov46650792007-10-08 20:38:39 -07002850static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002851{
2852 wext_proc_exit(net);
2853
2854 proc_net_remove(net, "ptype");
2855 proc_net_remove(net, "softnet_stat");
2856 proc_net_remove(net, "dev");
2857}
2858
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002859static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002860 .init = dev_proc_net_init,
2861 .exit = dev_proc_net_exit,
2862};
2863
2864static int __init dev_proc_init(void)
2865{
2866 return register_pernet_subsys(&dev_proc_ops);
2867}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868#else
2869#define dev_proc_init() 0
2870#endif /* CONFIG_PROC_FS */
2871
2872
2873/**
2874 * netdev_set_master - set up master/slave pair
2875 * @slave: slave device
2876 * @master: new master device
2877 *
2878 * Changes the master device of the slave. Pass %NULL to break the
2879 * bonding. The caller must hold the RTNL semaphore. On a failure
2880 * a negative errno code is returned. On success the reference counts
2881 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2882 * function returns zero.
2883 */
2884int netdev_set_master(struct net_device *slave, struct net_device *master)
2885{
2886 struct net_device *old = slave->master;
2887
2888 ASSERT_RTNL();
2889
2890 if (master) {
2891 if (old)
2892 return -EBUSY;
2893 dev_hold(master);
2894 }
2895
2896 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002897
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 synchronize_net();
2899
2900 if (old)
2901 dev_put(old);
2902
2903 if (master)
2904 slave->flags |= IFF_SLAVE;
2905 else
2906 slave->flags &= ~IFF_SLAVE;
2907
2908 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2909 return 0;
2910}
2911
Wang Chendad9b332008-06-18 01:48:28 -07002912static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002913{
2914 unsigned short old_flags = dev->flags;
2915
Patrick McHardy24023452007-07-14 18:51:31 -07002916 ASSERT_RTNL();
2917
Wang Chendad9b332008-06-18 01:48:28 -07002918 dev->flags |= IFF_PROMISC;
2919 dev->promiscuity += inc;
2920 if (dev->promiscuity == 0) {
2921 /*
2922 * Avoid overflow.
2923 * If inc causes overflow, untouch promisc and return error.
2924 */
2925 if (inc < 0)
2926 dev->flags &= ~IFF_PROMISC;
2927 else {
2928 dev->promiscuity -= inc;
2929 printk(KERN_WARNING "%s: promiscuity touches roof, "
2930 "set promiscuity failed, promiscuity feature "
2931 "of device might be broken.\n", dev->name);
2932 return -EOVERFLOW;
2933 }
2934 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002935 if (dev->flags != old_flags) {
2936 printk(KERN_INFO "device %s %s promiscuous mode\n",
2937 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2938 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002939 if (audit_enabled)
2940 audit_log(current->audit_context, GFP_ATOMIC,
2941 AUDIT_ANOM_PROMISCUOUS,
2942 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2943 dev->name, (dev->flags & IFF_PROMISC),
2944 (old_flags & IFF_PROMISC),
2945 audit_get_loginuid(current),
2946 current->uid, current->gid,
2947 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002948
2949 if (dev->change_rx_flags)
2950 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002951 }
Wang Chendad9b332008-06-18 01:48:28 -07002952 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002953}
2954
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955/**
2956 * dev_set_promiscuity - update promiscuity count on a device
2957 * @dev: device
2958 * @inc: modifier
2959 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002960 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 * remains above zero the interface remains promiscuous. Once it hits zero
2962 * the device reverts back to normal filtering operation. A negative inc
2963 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002964 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 */
Wang Chendad9b332008-06-18 01:48:28 -07002966int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967{
2968 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002969 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970
Wang Chendad9b332008-06-18 01:48:28 -07002971 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002972 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002973 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002974 if (dev->flags != old_flags)
2975 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977}
2978
2979/**
2980 * dev_set_allmulti - update allmulti count on a device
2981 * @dev: device
2982 * @inc: modifier
2983 *
2984 * Add or remove reception of all multicast frames to a device. While the
2985 * count in the device remains above zero the interface remains listening
2986 * to all interfaces. Once it hits zero the device reverts back to normal
2987 * filtering operation. A negative @inc value is used to drop the counter
2988 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002989 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 */
2991
Wang Chendad9b332008-06-18 01:48:28 -07002992int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993{
2994 unsigned short old_flags = dev->flags;
2995
Patrick McHardy24023452007-07-14 18:51:31 -07002996 ASSERT_RTNL();
2997
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07002999 dev->allmulti += inc;
3000 if (dev->allmulti == 0) {
3001 /*
3002 * Avoid overflow.
3003 * If inc causes overflow, untouch allmulti and return error.
3004 */
3005 if (inc < 0)
3006 dev->flags &= ~IFF_ALLMULTI;
3007 else {
3008 dev->allmulti -= inc;
3009 printk(KERN_WARNING "%s: allmulti touches roof, "
3010 "set allmulti failed, allmulti feature of "
3011 "device might be broken.\n", dev->name);
3012 return -EOVERFLOW;
3013 }
3014 }
Patrick McHardy24023452007-07-14 18:51:31 -07003015 if (dev->flags ^ old_flags) {
3016 if (dev->change_rx_flags)
3017 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003018 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003019 }
Wang Chendad9b332008-06-18 01:48:28 -07003020 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003021}
3022
3023/*
3024 * Upload unicast and multicast address lists to device and
3025 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003026 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003027 * are present.
3028 */
3029void __dev_set_rx_mode(struct net_device *dev)
3030{
3031 /* dev_open will call this function so the list will stay sane. */
3032 if (!(dev->flags&IFF_UP))
3033 return;
3034
3035 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003036 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003037
3038 if (dev->set_rx_mode)
3039 dev->set_rx_mode(dev);
3040 else {
3041 /* Unicast addresses changes may only happen under the rtnl,
3042 * therefore calling __dev_set_promiscuity here is safe.
3043 */
3044 if (dev->uc_count > 0 && !dev->uc_promisc) {
3045 __dev_set_promiscuity(dev, 1);
3046 dev->uc_promisc = 1;
3047 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3048 __dev_set_promiscuity(dev, -1);
3049 dev->uc_promisc = 0;
3050 }
3051
3052 if (dev->set_multicast_list)
3053 dev->set_multicast_list(dev);
3054 }
3055}
3056
3057void dev_set_rx_mode(struct net_device *dev)
3058{
David S. Millerb9e40852008-07-15 00:15:08 -07003059 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003060 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003061 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062}
3063
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003064int __dev_addr_delete(struct dev_addr_list **list, int *count,
3065 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003066{
3067 struct dev_addr_list *da;
3068
3069 for (; (da = *list) != NULL; list = &da->next) {
3070 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3071 alen == da->da_addrlen) {
3072 if (glbl) {
3073 int old_glbl = da->da_gusers;
3074 da->da_gusers = 0;
3075 if (old_glbl == 0)
3076 break;
3077 }
3078 if (--da->da_users)
3079 return 0;
3080
3081 *list = da->next;
3082 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003083 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003084 return 0;
3085 }
3086 }
3087 return -ENOENT;
3088}
3089
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003090int __dev_addr_add(struct dev_addr_list **list, int *count,
3091 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003092{
3093 struct dev_addr_list *da;
3094
3095 for (da = *list; da != NULL; da = da->next) {
3096 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3097 da->da_addrlen == alen) {
3098 if (glbl) {
3099 int old_glbl = da->da_gusers;
3100 da->da_gusers = 1;
3101 if (old_glbl)
3102 return 0;
3103 }
3104 da->da_users++;
3105 return 0;
3106 }
3107 }
3108
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003109 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003110 if (da == NULL)
3111 return -ENOMEM;
3112 memcpy(da->da_addr, addr, alen);
3113 da->da_addrlen = alen;
3114 da->da_users = 1;
3115 da->da_gusers = glbl ? 1 : 0;
3116 da->next = *list;
3117 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003118 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003119 return 0;
3120}
3121
Patrick McHardy4417da62007-06-27 01:28:10 -07003122/**
3123 * dev_unicast_delete - Release secondary unicast address.
3124 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003125 * @addr: address to delete
3126 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003127 *
3128 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003129 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003130 *
3131 * The caller must hold the rtnl_mutex.
3132 */
3133int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3134{
3135 int err;
3136
3137 ASSERT_RTNL();
3138
David S. Millerb9e40852008-07-15 00:15:08 -07003139 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003140 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3141 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003142 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003143 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003144 return err;
3145}
3146EXPORT_SYMBOL(dev_unicast_delete);
3147
3148/**
3149 * dev_unicast_add - add a secondary unicast address
3150 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003151 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003152 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003153 *
3154 * Add a secondary unicast address to the device or increase
3155 * the reference count if it already exists.
3156 *
3157 * The caller must hold the rtnl_mutex.
3158 */
3159int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3160{
3161 int err;
3162
3163 ASSERT_RTNL();
3164
David S. Millerb9e40852008-07-15 00:15:08 -07003165 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003166 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3167 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003168 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003169 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003170 return err;
3171}
3172EXPORT_SYMBOL(dev_unicast_add);
3173
Chris Leeche83a2ea2008-01-31 16:53:23 -08003174int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3175 struct dev_addr_list **from, int *from_count)
3176{
3177 struct dev_addr_list *da, *next;
3178 int err = 0;
3179
3180 da = *from;
3181 while (da != NULL) {
3182 next = da->next;
3183 if (!da->da_synced) {
3184 err = __dev_addr_add(to, to_count,
3185 da->da_addr, da->da_addrlen, 0);
3186 if (err < 0)
3187 break;
3188 da->da_synced = 1;
3189 da->da_users++;
3190 } else if (da->da_users == 1) {
3191 __dev_addr_delete(to, to_count,
3192 da->da_addr, da->da_addrlen, 0);
3193 __dev_addr_delete(from, from_count,
3194 da->da_addr, da->da_addrlen, 0);
3195 }
3196 da = next;
3197 }
3198 return err;
3199}
3200
3201void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3202 struct dev_addr_list **from, int *from_count)
3203{
3204 struct dev_addr_list *da, *next;
3205
3206 da = *from;
3207 while (da != NULL) {
3208 next = da->next;
3209 if (da->da_synced) {
3210 __dev_addr_delete(to, to_count,
3211 da->da_addr, da->da_addrlen, 0);
3212 da->da_synced = 0;
3213 __dev_addr_delete(from, from_count,
3214 da->da_addr, da->da_addrlen, 0);
3215 }
3216 da = next;
3217 }
3218}
3219
3220/**
3221 * dev_unicast_sync - Synchronize device's unicast list to another device
3222 * @to: destination device
3223 * @from: source device
3224 *
3225 * Add newly added addresses to the destination device and release
3226 * addresses that have no users left. The source device must be
3227 * locked by netif_tx_lock_bh.
3228 *
3229 * This function is intended to be called from the dev->set_rx_mode
3230 * function of layered software devices.
3231 */
3232int dev_unicast_sync(struct net_device *to, struct net_device *from)
3233{
3234 int err = 0;
3235
David S. Millerb9e40852008-07-15 00:15:08 -07003236 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003237 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3238 &from->uc_list, &from->uc_count);
3239 if (!err)
3240 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003241 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003242 return err;
3243}
3244EXPORT_SYMBOL(dev_unicast_sync);
3245
3246/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003247 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003248 * @to: destination device
3249 * @from: source device
3250 *
3251 * Remove all addresses that were added to the destination device by
3252 * dev_unicast_sync(). This function is intended to be called from the
3253 * dev->stop function of layered software devices.
3254 */
3255void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3256{
David S. Millerb9e40852008-07-15 00:15:08 -07003257 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003258 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003259
3260 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3261 &from->uc_list, &from->uc_count);
3262 __dev_set_rx_mode(to);
3263
David S. Millere308a5d2008-07-15 00:13:44 -07003264 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003265 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003266}
3267EXPORT_SYMBOL(dev_unicast_unsync);
3268
Denis Cheng12972622007-07-18 02:12:56 -07003269static void __dev_addr_discard(struct dev_addr_list **list)
3270{
3271 struct dev_addr_list *tmp;
3272
3273 while (*list != NULL) {
3274 tmp = *list;
3275 *list = tmp->next;
3276 if (tmp->da_users > tmp->da_gusers)
3277 printk("__dev_addr_discard: address leakage! "
3278 "da_users=%d\n", tmp->da_users);
3279 kfree(tmp);
3280 }
3281}
3282
Denis Cheng26cc2522007-07-18 02:12:03 -07003283static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003284{
David S. Millerb9e40852008-07-15 00:15:08 -07003285 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003286
Patrick McHardy4417da62007-06-27 01:28:10 -07003287 __dev_addr_discard(&dev->uc_list);
3288 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003289
Denis Cheng456ad752007-07-18 02:10:54 -07003290 __dev_addr_discard(&dev->mc_list);
3291 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003292
David S. Millerb9e40852008-07-15 00:15:08 -07003293 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003294}
3295
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296unsigned dev_get_flags(const struct net_device *dev)
3297{
3298 unsigned flags;
3299
3300 flags = (dev->flags & ~(IFF_PROMISC |
3301 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003302 IFF_RUNNING |
3303 IFF_LOWER_UP |
3304 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 (dev->gflags & (IFF_PROMISC |
3306 IFF_ALLMULTI));
3307
Stefan Rompfb00055a2006-03-20 17:09:11 -08003308 if (netif_running(dev)) {
3309 if (netif_oper_up(dev))
3310 flags |= IFF_RUNNING;
3311 if (netif_carrier_ok(dev))
3312 flags |= IFF_LOWER_UP;
3313 if (netif_dormant(dev))
3314 flags |= IFF_DORMANT;
3315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
3317 return flags;
3318}
3319
3320int dev_change_flags(struct net_device *dev, unsigned flags)
3321{
Thomas Graf7c355f52007-06-05 16:03:03 -07003322 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 int old_flags = dev->flags;
3324
Patrick McHardy24023452007-07-14 18:51:31 -07003325 ASSERT_RTNL();
3326
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 /*
3328 * Set the flags on our device.
3329 */
3330
3331 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3332 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3333 IFF_AUTOMEDIA)) |
3334 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3335 IFF_ALLMULTI));
3336
3337 /*
3338 * Load in the correct multicast list now the flags have changed.
3339 */
3340
David Woodhouse0e917962008-05-20 14:36:14 -07003341 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003342 dev->change_rx_flags(dev, IFF_MULTICAST);
3343
Patrick McHardy4417da62007-06-27 01:28:10 -07003344 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345
3346 /*
3347 * Have we downed the interface. We handle IFF_UP ourselves
3348 * according to user attempts to set it, rather than blindly
3349 * setting it.
3350 */
3351
3352 ret = 0;
3353 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3354 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3355
3356 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003357 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 }
3359
3360 if (dev->flags & IFF_UP &&
3361 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3362 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003363 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
3365 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3366 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3367 dev->gflags ^= IFF_PROMISC;
3368 dev_set_promiscuity(dev, inc);
3369 }
3370
3371 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3372 is important. Some (broken) drivers set IFF_PROMISC, when
3373 IFF_ALLMULTI is requested not asking us and not reporting.
3374 */
3375 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3376 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3377 dev->gflags ^= IFF_ALLMULTI;
3378 dev_set_allmulti(dev, inc);
3379 }
3380
Thomas Graf7c355f52007-06-05 16:03:03 -07003381 /* Exclude state transition flags, already notified */
3382 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3383 if (changes)
3384 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385
3386 return ret;
3387}
3388
3389int dev_set_mtu(struct net_device *dev, int new_mtu)
3390{
3391 int err;
3392
3393 if (new_mtu == dev->mtu)
3394 return 0;
3395
3396 /* MTU must be positive. */
3397 if (new_mtu < 0)
3398 return -EINVAL;
3399
3400 if (!netif_device_present(dev))
3401 return -ENODEV;
3402
3403 err = 0;
3404 if (dev->change_mtu)
3405 err = dev->change_mtu(dev, new_mtu);
3406 else
3407 dev->mtu = new_mtu;
3408 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003409 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 return err;
3411}
3412
3413int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3414{
3415 int err;
3416
3417 if (!dev->set_mac_address)
3418 return -EOPNOTSUPP;
3419 if (sa->sa_family != dev->type)
3420 return -EINVAL;
3421 if (!netif_device_present(dev))
3422 return -ENODEV;
3423 err = dev->set_mac_address(dev, sa);
3424 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003425 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 return err;
3427}
3428
3429/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003430 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003432static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433{
3434 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003435 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
3437 if (!dev)
3438 return -ENODEV;
3439
3440 switch (cmd) {
3441 case SIOCGIFFLAGS: /* Get interface flags */
3442 ifr->ifr_flags = dev_get_flags(dev);
3443 return 0;
3444
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 case SIOCGIFMETRIC: /* Get the metric on the interface
3446 (currently unused) */
3447 ifr->ifr_metric = 0;
3448 return 0;
3449
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 case SIOCGIFMTU: /* Get the MTU of a device */
3451 ifr->ifr_mtu = dev->mtu;
3452 return 0;
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 case SIOCGIFHWADDR:
3455 if (!dev->addr_len)
3456 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3457 else
3458 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3459 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3460 ifr->ifr_hwaddr.sa_family = dev->type;
3461 return 0;
3462
Jeff Garzik14e3e072007-10-08 00:06:32 -07003463 case SIOCGIFSLAVE:
3464 err = -EINVAL;
3465 break;
3466
3467 case SIOCGIFMAP:
3468 ifr->ifr_map.mem_start = dev->mem_start;
3469 ifr->ifr_map.mem_end = dev->mem_end;
3470 ifr->ifr_map.base_addr = dev->base_addr;
3471 ifr->ifr_map.irq = dev->irq;
3472 ifr->ifr_map.dma = dev->dma;
3473 ifr->ifr_map.port = dev->if_port;
3474 return 0;
3475
3476 case SIOCGIFINDEX:
3477 ifr->ifr_ifindex = dev->ifindex;
3478 return 0;
3479
3480 case SIOCGIFTXQLEN:
3481 ifr->ifr_qlen = dev->tx_queue_len;
3482 return 0;
3483
3484 default:
3485 /* dev_ioctl() should ensure this case
3486 * is never reached
3487 */
3488 WARN_ON(1);
3489 err = -EINVAL;
3490 break;
3491
3492 }
3493 return err;
3494}
3495
3496/*
3497 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3498 */
3499static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3500{
3501 int err;
3502 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3503
3504 if (!dev)
3505 return -ENODEV;
3506
3507 switch (cmd) {
3508 case SIOCSIFFLAGS: /* Set interface flags */
3509 return dev_change_flags(dev, ifr->ifr_flags);
3510
3511 case SIOCSIFMETRIC: /* Set the metric on the interface
3512 (currently unused) */
3513 return -EOPNOTSUPP;
3514
3515 case SIOCSIFMTU: /* Set the MTU of a device */
3516 return dev_set_mtu(dev, ifr->ifr_mtu);
3517
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518 case SIOCSIFHWADDR:
3519 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3520
3521 case SIOCSIFHWBROADCAST:
3522 if (ifr->ifr_hwaddr.sa_family != dev->type)
3523 return -EINVAL;
3524 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3525 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003526 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 return 0;
3528
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 case SIOCSIFMAP:
3530 if (dev->set_config) {
3531 if (!netif_device_present(dev))
3532 return -ENODEV;
3533 return dev->set_config(dev, &ifr->ifr_map);
3534 }
3535 return -EOPNOTSUPP;
3536
3537 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003538 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3540 return -EINVAL;
3541 if (!netif_device_present(dev))
3542 return -ENODEV;
3543 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3544 dev->addr_len, 1);
3545
3546 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003547 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3549 return -EINVAL;
3550 if (!netif_device_present(dev))
3551 return -ENODEV;
3552 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3553 dev->addr_len, 1);
3554
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 case SIOCSIFTXQLEN:
3556 if (ifr->ifr_qlen < 0)
3557 return -EINVAL;
3558 dev->tx_queue_len = ifr->ifr_qlen;
3559 return 0;
3560
3561 case SIOCSIFNAME:
3562 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3563 return dev_change_name(dev, ifr->ifr_newname);
3564
3565 /*
3566 * Unknown or private ioctl
3567 */
3568
3569 default:
3570 if ((cmd >= SIOCDEVPRIVATE &&
3571 cmd <= SIOCDEVPRIVATE + 15) ||
3572 cmd == SIOCBONDENSLAVE ||
3573 cmd == SIOCBONDRELEASE ||
3574 cmd == SIOCBONDSETHWADDR ||
3575 cmd == SIOCBONDSLAVEINFOQUERY ||
3576 cmd == SIOCBONDINFOQUERY ||
3577 cmd == SIOCBONDCHANGEACTIVE ||
3578 cmd == SIOCGMIIPHY ||
3579 cmd == SIOCGMIIREG ||
3580 cmd == SIOCSMIIREG ||
3581 cmd == SIOCBRADDIF ||
3582 cmd == SIOCBRDELIF ||
3583 cmd == SIOCWANDEV) {
3584 err = -EOPNOTSUPP;
3585 if (dev->do_ioctl) {
3586 if (netif_device_present(dev))
3587 err = dev->do_ioctl(dev, ifr,
3588 cmd);
3589 else
3590 err = -ENODEV;
3591 }
3592 } else
3593 err = -EINVAL;
3594
3595 }
3596 return err;
3597}
3598
3599/*
3600 * This function handles all "interface"-type I/O control requests. The actual
3601 * 'doing' part of this is dev_ifsioc above.
3602 */
3603
3604/**
3605 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003606 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 * @cmd: command to issue
3608 * @arg: pointer to a struct ifreq in user space
3609 *
3610 * Issue ioctl functions to devices. This is normally called by the
3611 * user space syscall interfaces but can sometimes be useful for
3612 * other purposes. The return value is the return from the syscall if
3613 * positive or a negative errno code on error.
3614 */
3615
Eric W. Biederman881d9662007-09-17 11:56:21 -07003616int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617{
3618 struct ifreq ifr;
3619 int ret;
3620 char *colon;
3621
3622 /* One special case: SIOCGIFCONF takes ifconf argument
3623 and requires shared lock, because it sleeps writing
3624 to user space.
3625 */
3626
3627 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003628 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003629 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003630 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631 return ret;
3632 }
3633 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003634 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635
3636 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3637 return -EFAULT;
3638
3639 ifr.ifr_name[IFNAMSIZ-1] = 0;
3640
3641 colon = strchr(ifr.ifr_name, ':');
3642 if (colon)
3643 *colon = 0;
3644
3645 /*
3646 * See which interface the caller is talking about.
3647 */
3648
3649 switch (cmd) {
3650 /*
3651 * These ioctl calls:
3652 * - can be done by all.
3653 * - atomic and do not require locking.
3654 * - return a value
3655 */
3656 case SIOCGIFFLAGS:
3657 case SIOCGIFMETRIC:
3658 case SIOCGIFMTU:
3659 case SIOCGIFHWADDR:
3660 case SIOCGIFSLAVE:
3661 case SIOCGIFMAP:
3662 case SIOCGIFINDEX:
3663 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003664 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003666 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667 read_unlock(&dev_base_lock);
3668 if (!ret) {
3669 if (colon)
3670 *colon = ':';
3671 if (copy_to_user(arg, &ifr,
3672 sizeof(struct ifreq)))
3673 ret = -EFAULT;
3674 }
3675 return ret;
3676
3677 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003678 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003680 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 rtnl_unlock();
3682 if (!ret) {
3683 if (colon)
3684 *colon = ':';
3685 if (copy_to_user(arg, &ifr,
3686 sizeof(struct ifreq)))
3687 ret = -EFAULT;
3688 }
3689 return ret;
3690
3691 /*
3692 * These ioctl calls:
3693 * - require superuser power.
3694 * - require strict serialization.
3695 * - return a value
3696 */
3697 case SIOCGMIIPHY:
3698 case SIOCGMIIREG:
3699 case SIOCSIFNAME:
3700 if (!capable(CAP_NET_ADMIN))
3701 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003702 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003704 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 rtnl_unlock();
3706 if (!ret) {
3707 if (colon)
3708 *colon = ':';
3709 if (copy_to_user(arg, &ifr,
3710 sizeof(struct ifreq)))
3711 ret = -EFAULT;
3712 }
3713 return ret;
3714
3715 /*
3716 * These ioctl calls:
3717 * - require superuser power.
3718 * - require strict serialization.
3719 * - do not return a value
3720 */
3721 case SIOCSIFFLAGS:
3722 case SIOCSIFMETRIC:
3723 case SIOCSIFMTU:
3724 case SIOCSIFMAP:
3725 case SIOCSIFHWADDR:
3726 case SIOCSIFSLAVE:
3727 case SIOCADDMULTI:
3728 case SIOCDELMULTI:
3729 case SIOCSIFHWBROADCAST:
3730 case SIOCSIFTXQLEN:
3731 case SIOCSMIIREG:
3732 case SIOCBONDENSLAVE:
3733 case SIOCBONDRELEASE:
3734 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 case SIOCBONDCHANGEACTIVE:
3736 case SIOCBRADDIF:
3737 case SIOCBRDELIF:
3738 if (!capable(CAP_NET_ADMIN))
3739 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003740 /* fall through */
3741 case SIOCBONDSLAVEINFOQUERY:
3742 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003743 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003745 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 rtnl_unlock();
3747 return ret;
3748
3749 case SIOCGIFMEM:
3750 /* Get the per device memory space. We can add this but
3751 * currently do not support it */
3752 case SIOCSIFMEM:
3753 /* Set the per device memory buffer space.
3754 * Not applicable in our case */
3755 case SIOCSIFLINK:
3756 return -EINVAL;
3757
3758 /*
3759 * Unknown or private ioctl.
3760 */
3761 default:
3762 if (cmd == SIOCWANDEV ||
3763 (cmd >= SIOCDEVPRIVATE &&
3764 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003765 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003767 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 rtnl_unlock();
3769 if (!ret && copy_to_user(arg, &ifr,
3770 sizeof(struct ifreq)))
3771 ret = -EFAULT;
3772 return ret;
3773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003775 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003776 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 return -EINVAL;
3778 }
3779}
3780
3781
3782/**
3783 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003784 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 *
3786 * Returns a suitable unique value for a new device interface
3787 * number. The caller must hold the rtnl semaphore or the
3788 * dev_base_lock to be sure it remains unique.
3789 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003790static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791{
3792 static int ifindex;
3793 for (;;) {
3794 if (++ifindex <= 0)
3795 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003796 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 return ifindex;
3798 }
3799}
3800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801/* Delayed registration/unregisteration */
3802static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003803static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003805static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806{
3807 spin_lock(&net_todo_list_lock);
3808 list_add_tail(&dev->todo_list, &net_todo_list);
3809 spin_unlock(&net_todo_list_lock);
3810}
3811
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003812static void rollback_registered(struct net_device *dev)
3813{
3814 BUG_ON(dev_boot_phase);
3815 ASSERT_RTNL();
3816
3817 /* Some devices call without registering for initialization unwind. */
3818 if (dev->reg_state == NETREG_UNINITIALIZED) {
3819 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3820 "was registered\n", dev->name, dev);
3821
3822 WARN_ON(1);
3823 return;
3824 }
3825
3826 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3827
3828 /* If device is running, close it first. */
3829 dev_close(dev);
3830
3831 /* And unlink it from device chain. */
3832 unlist_netdevice(dev);
3833
3834 dev->reg_state = NETREG_UNREGISTERING;
3835
3836 synchronize_net();
3837
3838 /* Shutdown queueing discipline. */
3839 dev_shutdown(dev);
3840
3841
3842 /* Notify protocols, that we are about to destroy
3843 this device. They should clean all the things.
3844 */
3845 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3846
3847 /*
3848 * Flush the unicast and multicast chains
3849 */
3850 dev_addr_discard(dev);
3851
3852 if (dev->uninit)
3853 dev->uninit(dev);
3854
3855 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003856 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003857
3858 /* Remove entries from kobject tree */
3859 netdev_unregister_kobject(dev);
3860
3861 synchronize_net();
3862
3863 dev_put(dev);
3864}
3865
David S. Millere8a04642008-07-17 00:34:19 -07003866static void __netdev_init_queue_locks_one(struct net_device *dev,
3867 struct netdev_queue *dev_queue,
3868 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07003869{
3870 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003871 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07003872 dev_queue->xmit_lock_owner = -1;
3873}
3874
3875static void netdev_init_queue_locks(struct net_device *dev)
3876{
David S. Millere8a04642008-07-17 00:34:19 -07003877 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3878 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07003879}
3880
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881/**
3882 * register_netdevice - register a network device
3883 * @dev: device to register
3884 *
3885 * Take a completed network device structure and add it to the kernel
3886 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3887 * chain. 0 is returned on success. A negative errno code is returned
3888 * on a failure to set up the device, or if the name is a duplicate.
3889 *
3890 * Callers must hold the rtnl semaphore. You may want
3891 * register_netdev() instead of this.
3892 *
3893 * BUGS:
3894 * The locking appears insufficient to guarantee two parallel registers
3895 * will not get the same name.
3896 */
3897
3898int register_netdevice(struct net_device *dev)
3899{
3900 struct hlist_head *head;
3901 struct hlist_node *p;
3902 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003903 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
3905 BUG_ON(dev_boot_phase);
3906 ASSERT_RTNL();
3907
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003908 might_sleep();
3909
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 /* When net_device's are persistent, this will be fatal. */
3911 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003912 BUG_ON(!dev_net(dev));
3913 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914
David S. Millerf1f28aa2008-07-15 00:08:33 -07003915 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003916 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07003917 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919 dev->iflink = -1;
3920
3921 /* Init, if this function is available */
3922 if (dev->init) {
3923 ret = dev->init(dev);
3924 if (ret) {
3925 if (ret > 0)
3926 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003927 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 }
3929 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 if (!dev_valid_name(dev->name)) {
3932 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003933 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 }
3935
Eric W. Biederman881d9662007-09-17 11:56:21 -07003936 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 if (dev->iflink == -1)
3938 dev->iflink = dev->ifindex;
3939
3940 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003941 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 hlist_for_each(p, head) {
3943 struct net_device *d
3944 = hlist_entry(p, struct net_device, name_hlist);
3945 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3946 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003947 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003951 /* Fix illegal checksum combinations */
3952 if ((dev->features & NETIF_F_HW_CSUM) &&
3953 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3954 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3955 dev->name);
3956 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3957 }
3958
3959 if ((dev->features & NETIF_F_NO_CSUM) &&
3960 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3961 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3962 dev->name);
3963 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3964 }
3965
3966
Linus Torvalds1da177e2005-04-16 15:20:36 -07003967 /* Fix illegal SG+CSUM combinations. */
3968 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003969 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003970 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 dev->name);
3972 dev->features &= ~NETIF_F_SG;
3973 }
3974
3975 /* TSO requires that SG is present as well. */
3976 if ((dev->features & NETIF_F_TSO) &&
3977 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003978 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 dev->name);
3980 dev->features &= ~NETIF_F_TSO;
3981 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003982 if (dev->features & NETIF_F_UFO) {
3983 if (!(dev->features & NETIF_F_HW_CSUM)) {
3984 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3985 "NETIF_F_HW_CSUM feature.\n",
3986 dev->name);
3987 dev->features &= ~NETIF_F_UFO;
3988 }
3989 if (!(dev->features & NETIF_F_SG)) {
3990 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3991 "NETIF_F_SG feature.\n",
3992 dev->name);
3993 dev->features &= ~NETIF_F_UFO;
3994 }
3995 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07003997 /* Enable software GSO if SG is supported. */
3998 if (dev->features & NETIF_F_SG)
3999 dev->features |= NETIF_F_GSO;
4000
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004001 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004002 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004003 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004004 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004005 dev->reg_state = NETREG_REGISTERED;
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 /*
4008 * Default initial state at registry is that the
4009 * device is present.
4010 */
4011
4012 set_bit(__LINK_STATE_PRESENT, &dev->state);
4013
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004016 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
4018 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004019 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004020 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004021 if (ret) {
4022 rollback_registered(dev);
4023 dev->reg_state = NETREG_UNREGISTERED;
4024 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
4026out:
4027 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004028
4029err_uninit:
4030 if (dev->uninit)
4031 dev->uninit(dev);
4032 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033}
4034
4035/**
4036 * register_netdev - register a network device
4037 * @dev: device to register
4038 *
4039 * Take a completed network device structure and add it to the kernel
4040 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4041 * chain. 0 is returned on success. A negative errno code is returned
4042 * on a failure to set up the device, or if the name is a duplicate.
4043 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004044 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 * and expands the device name if you passed a format string to
4046 * alloc_netdev.
4047 */
4048int register_netdev(struct net_device *dev)
4049{
4050 int err;
4051
4052 rtnl_lock();
4053
4054 /*
4055 * If the name is a format string the caller wants us to do a
4056 * name allocation.
4057 */
4058 if (strchr(dev->name, '%')) {
4059 err = dev_alloc_name(dev, dev->name);
4060 if (err < 0)
4061 goto out;
4062 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004063
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 err = register_netdevice(dev);
4065out:
4066 rtnl_unlock();
4067 return err;
4068}
4069EXPORT_SYMBOL(register_netdev);
4070
4071/*
4072 * netdev_wait_allrefs - wait until all references are gone.
4073 *
4074 * This is called when unregistering network devices.
4075 *
4076 * Any protocol or device that holds a reference should register
4077 * for netdevice notification, and cleanup and put back the
4078 * reference if they receive an UNREGISTER event.
4079 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004080 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 */
4082static void netdev_wait_allrefs(struct net_device *dev)
4083{
4084 unsigned long rebroadcast_time, warning_time;
4085
4086 rebroadcast_time = warning_time = jiffies;
4087 while (atomic_read(&dev->refcnt) != 0) {
4088 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004089 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090
4091 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004092 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093
4094 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4095 &dev->state)) {
4096 /* We must not have linkwatch events
4097 * pending on unregister. If this
4098 * happens, we simply run the queue
4099 * unscheduled, resulting in a noop
4100 * for this device.
4101 */
4102 linkwatch_run_queue();
4103 }
4104
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004105 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
4107 rebroadcast_time = jiffies;
4108 }
4109
4110 msleep(250);
4111
4112 if (time_after(jiffies, warning_time + 10 * HZ)) {
4113 printk(KERN_EMERG "unregister_netdevice: "
4114 "waiting for %s to become free. Usage "
4115 "count = %d\n",
4116 dev->name, atomic_read(&dev->refcnt));
4117 warning_time = jiffies;
4118 }
4119 }
4120}
4121
4122/* The sequence is:
4123 *
4124 * rtnl_lock();
4125 * ...
4126 * register_netdevice(x1);
4127 * register_netdevice(x2);
4128 * ...
4129 * unregister_netdevice(y1);
4130 * unregister_netdevice(y2);
4131 * ...
4132 * rtnl_unlock();
4133 * free_netdev(y1);
4134 * free_netdev(y2);
4135 *
4136 * We are invoked by rtnl_unlock() after it drops the semaphore.
4137 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004138 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 * without deadlocking with linkwatch via keventd.
4140 * 2) Since we run with the RTNL semaphore not held, we can sleep
4141 * safely in order to wait for the netdev refcnt to drop to zero.
4142 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004143static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144void netdev_run_todo(void)
4145{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004146 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147
4148 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004149 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150
4151 /* Not safe to do outside the semaphore. We must not return
4152 * until all unregister events invoked by the local processor
4153 * have been completed (either by this todo run, or one on
4154 * another cpu).
4155 */
4156 if (list_empty(&net_todo_list))
4157 goto out;
4158
4159 /* Snapshot list, allow later requests */
4160 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004161 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004163
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 while (!list_empty(&list)) {
4165 struct net_device *dev
4166 = list_entry(list.next, struct net_device, todo_list);
4167 list_del(&dev->todo_list);
4168
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004169 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 printk(KERN_ERR "network todo '%s' but state %d\n",
4171 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004172 dump_stack();
4173 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004175
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004176 dev->reg_state = NETREG_UNREGISTERED;
4177
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004178 on_each_cpu(flush_backlog, dev, 1);
4179
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004180 netdev_wait_allrefs(dev);
4181
4182 /* paranoia */
4183 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004184 WARN_ON(dev->ip_ptr);
4185 WARN_ON(dev->ip6_ptr);
4186 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004187
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004188 if (dev->destructor)
4189 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004190
4191 /* Free network device */
4192 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 }
4194
4195out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004196 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197}
4198
Rusty Russell5a1b5892007-04-28 21:04:03 -07004199static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004200{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004201 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004202}
4203
David S. Millerdc2b4842008-07-08 17:18:23 -07004204static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004205 struct netdev_queue *queue,
4206 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004207{
David S. Millerdc2b4842008-07-08 17:18:23 -07004208 queue->dev = dev;
4209}
4210
David S. Millerbb949fb2008-07-08 16:55:56 -07004211static void netdev_init_queues(struct net_device *dev)
4212{
David S. Millere8a04642008-07-17 00:34:19 -07004213 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4214 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07004215 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07004216}
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004219 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 * @sizeof_priv: size of private data to allocate space for
4221 * @name: device name format string
4222 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004223 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 *
4225 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004226 * and performs basic initialization. Also allocates subquue structs
4227 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004229struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4230 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231{
David S. Millere8a04642008-07-17 00:34:19 -07004232 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07004234 size_t alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004235 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004237 BUG_ON(strlen(name) >= sizeof(dev->name));
4238
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004239 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004240 if (sizeof_priv) {
4241 /* ensure 32-byte alignment of private area */
4242 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4243 alloc_size += sizeof_priv;
4244 }
4245 /* ensure 32-byte alignment of whole construct */
4246 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004248 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004250 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 return NULL;
4252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
Stephen Hemminger79439862008-07-21 13:28:44 -07004254 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07004255 if (!tx) {
4256 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4257 "tx qdiscs.\n");
4258 kfree(p);
4259 return NULL;
4260 }
4261
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 dev = (struct net_device *)
4263 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4264 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004265 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266
David S. Millere8a04642008-07-17 00:34:19 -07004267 dev->_tx = tx;
4268 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004269 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07004270
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004271 if (sizeof_priv) {
4272 dev->priv = ((char *)dev +
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004273 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004274 & ~NETDEV_ALIGN_CONST));
4275 }
4276
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004277 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
David S. Millerbb949fb2008-07-08 16:55:56 -07004279 netdev_init_queues(dev);
4280
Rusty Russell5a1b5892007-04-28 21:04:03 -07004281 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004282 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 setup(dev);
4284 strcpy(dev->name, name);
4285 return dev;
4286}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004287EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
4289/**
4290 * free_netdev - free network device
4291 * @dev: device
4292 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004293 * This function does the last stage of destroying an allocated device
4294 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 * If this is the last reference then it will be freed.
4296 */
4297void free_netdev(struct net_device *dev)
4298{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004299 release_net(dev_net(dev));
4300
David S. Millere8a04642008-07-17 00:34:19 -07004301 kfree(dev->_tx);
4302
Stephen Hemminger3041a062006-05-26 13:25:24 -07004303 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 if (dev->reg_state == NETREG_UNINITIALIZED) {
4305 kfree((char *)dev - dev->padded);
4306 return;
4307 }
4308
4309 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4310 dev->reg_state = NETREG_RELEASED;
4311
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004312 /* will free via device release */
4313 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004315
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004317void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318{
4319 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004320 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321}
4322
4323/**
4324 * unregister_netdevice - remove device from the kernel
4325 * @dev: device
4326 *
4327 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004328 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 *
4330 * Callers must hold the rtnl semaphore. You may want
4331 * unregister_netdev() instead of this.
4332 */
4333
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004334void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335{
Herbert Xua6620712007-12-12 19:21:56 -08004336 ASSERT_RTNL();
4337
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004338 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 /* Finish processing unregister after unlock */
4340 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341}
4342
4343/**
4344 * unregister_netdev - remove device from the kernel
4345 * @dev: device
4346 *
4347 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004348 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 *
4350 * This is just a wrapper for unregister_netdevice that takes
4351 * the rtnl semaphore. In general you want to use this and not
4352 * unregister_netdevice.
4353 */
4354void unregister_netdev(struct net_device *dev)
4355{
4356 rtnl_lock();
4357 unregister_netdevice(dev);
4358 rtnl_unlock();
4359}
4360
4361EXPORT_SYMBOL(unregister_netdev);
4362
Eric W. Biedermance286d32007-09-12 13:53:49 +02004363/**
4364 * dev_change_net_namespace - move device to different nethost namespace
4365 * @dev: device
4366 * @net: network namespace
4367 * @pat: If not NULL name pattern to try if the current device name
4368 * is already taken in the destination network namespace.
4369 *
4370 * This function shuts down a device interface and moves it
4371 * to a new network namespace. On success 0 is returned, on
4372 * a failure a netagive errno code is returned.
4373 *
4374 * Callers must hold the rtnl semaphore.
4375 */
4376
4377int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4378{
4379 char buf[IFNAMSIZ];
4380 const char *destname;
4381 int err;
4382
4383 ASSERT_RTNL();
4384
4385 /* Don't allow namespace local devices to be moved. */
4386 err = -EINVAL;
4387 if (dev->features & NETIF_F_NETNS_LOCAL)
4388 goto out;
4389
4390 /* Ensure the device has been registrered */
4391 err = -EINVAL;
4392 if (dev->reg_state != NETREG_REGISTERED)
4393 goto out;
4394
4395 /* Get out if there is nothing todo */
4396 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004397 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004398 goto out;
4399
4400 /* Pick the destination device name, and ensure
4401 * we can use it in the destination network namespace.
4402 */
4403 err = -EEXIST;
4404 destname = dev->name;
4405 if (__dev_get_by_name(net, destname)) {
4406 /* We get here if we can't use the current device name */
4407 if (!pat)
4408 goto out;
4409 if (!dev_valid_name(pat))
4410 goto out;
4411 if (strchr(pat, '%')) {
4412 if (__dev_alloc_name(net, pat, buf) < 0)
4413 goto out;
4414 destname = buf;
4415 } else
4416 destname = pat;
4417 if (__dev_get_by_name(net, destname))
4418 goto out;
4419 }
4420
4421 /*
4422 * And now a mini version of register_netdevice unregister_netdevice.
4423 */
4424
4425 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004426 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004427
4428 /* And unlink it from device chain */
4429 err = -ENODEV;
4430 unlist_netdevice(dev);
4431
4432 synchronize_net();
4433
4434 /* Shutdown queueing discipline. */
4435 dev_shutdown(dev);
4436
4437 /* Notify protocols, that we are about to destroy
4438 this device. They should clean all the things.
4439 */
4440 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4441
4442 /*
4443 * Flush the unicast and multicast chains
4444 */
4445 dev_addr_discard(dev);
4446
4447 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004448 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004449
4450 /* Assign the new device name */
4451 if (destname != dev->name)
4452 strcpy(dev->name, destname);
4453
4454 /* If there is an ifindex conflict assign a new one */
4455 if (__dev_get_by_index(net, dev->ifindex)) {
4456 int iflink = (dev->iflink == dev->ifindex);
4457 dev->ifindex = dev_new_index(net);
4458 if (iflink)
4459 dev->iflink = dev->ifindex;
4460 }
4461
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004462 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004463 netdev_unregister_kobject(dev);
4464 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004465 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004466
4467 /* Add the device back in the hashes */
4468 list_netdevice(dev);
4469
4470 /* Notify protocols, that a new device appeared. */
4471 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4472
4473 synchronize_net();
4474 err = 0;
4475out:
4476 return err;
4477}
4478
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479static int dev_cpu_callback(struct notifier_block *nfb,
4480 unsigned long action,
4481 void *ocpu)
4482{
4483 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07004484 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 struct sk_buff *skb;
4486 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4487 struct softnet_data *sd, *oldsd;
4488
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004489 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490 return NOTIFY_OK;
4491
4492 local_irq_disable();
4493 cpu = smp_processor_id();
4494 sd = &per_cpu(softnet_data, cpu);
4495 oldsd = &per_cpu(softnet_data, oldcpu);
4496
4497 /* Find end of our completion_queue. */
4498 list_skb = &sd->completion_queue;
4499 while (*list_skb)
4500 list_skb = &(*list_skb)->next;
4501 /* Append completion queue from offline CPU. */
4502 *list_skb = oldsd->completion_queue;
4503 oldsd->completion_queue = NULL;
4504
4505 /* Find end of our output_queue. */
4506 list_net = &sd->output_queue;
4507 while (*list_net)
4508 list_net = &(*list_net)->next_sched;
4509 /* Append output queue from offline CPU. */
4510 *list_net = oldsd->output_queue;
4511 oldsd->output_queue = NULL;
4512
4513 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4514 local_irq_enable();
4515
4516 /* Process offline CPU's input_pkt_queue */
4517 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4518 netif_rx(skb);
4519
4520 return NOTIFY_OK;
4521}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522
Chris Leechdb217332006-06-17 21:24:58 -07004523#ifdef CONFIG_NET_DMA
4524/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004525 * net_dma_rebalance - try to maintain one DMA channel per CPU
4526 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4527 *
4528 * This is called when the number of channels allocated to the net_dma client
4529 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004530 */
Dan Williamsd379b012007-07-09 11:56:42 -07004531
4532static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004533{
Dan Williamsd379b012007-07-09 11:56:42 -07004534 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004535 struct dma_chan *chan;
4536
Dan Williamsd379b012007-07-09 11:56:42 -07004537 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004538 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004539 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004540 return;
4541 }
4542
4543 i = 0;
4544 cpu = first_cpu(cpu_online_map);
4545
Mike Travis0e12f842008-05-12 21:21:13 +02004546 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07004547 chan = net_dma->channels[chan_idx];
4548
4549 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4550 + (i < (num_online_cpus() %
4551 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004552
4553 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004554 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004555 cpu = next_cpu(cpu, cpu_online_map);
4556 n--;
4557 }
4558 i++;
4559 }
Chris Leechdb217332006-06-17 21:24:58 -07004560}
4561
4562/**
4563 * netdev_dma_event - event callback for the net_dma_client
4564 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004565 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004566 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004567 */
Dan Williamsd379b012007-07-09 11:56:42 -07004568static enum dma_state_client
4569netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4570 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004571{
Dan Williamsd379b012007-07-09 11:56:42 -07004572 int i, found = 0, pos = -1;
4573 struct net_dma *net_dma =
4574 container_of(client, struct net_dma, client);
4575 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4576
4577 spin_lock(&net_dma->lock);
4578 switch (state) {
4579 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004580 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004581 if (net_dma->channels[i] == chan) {
4582 found = 1;
4583 break;
4584 } else if (net_dma->channels[i] == NULL && pos < 0)
4585 pos = i;
4586
4587 if (!found && pos >= 0) {
4588 ack = DMA_ACK;
4589 net_dma->channels[pos] = chan;
4590 cpu_set(pos, net_dma->channel_mask);
4591 net_dma_rebalance(net_dma);
4592 }
Chris Leechdb217332006-06-17 21:24:58 -07004593 break;
4594 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004595 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004596 if (net_dma->channels[i] == chan) {
4597 found = 1;
4598 pos = i;
4599 break;
4600 }
4601
4602 if (found) {
4603 ack = DMA_ACK;
4604 cpu_clear(pos, net_dma->channel_mask);
4605 net_dma->channels[i] = NULL;
4606 net_dma_rebalance(net_dma);
4607 }
Chris Leechdb217332006-06-17 21:24:58 -07004608 break;
4609 default:
4610 break;
4611 }
Dan Williamsd379b012007-07-09 11:56:42 -07004612 spin_unlock(&net_dma->lock);
4613
4614 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004615}
4616
4617/**
4618 * netdev_dma_regiser - register the networking subsystem as a DMA client
4619 */
4620static int __init netdev_dma_register(void)
4621{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004622 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4623 GFP_KERNEL);
4624 if (unlikely(!net_dma.channels)) {
4625 printk(KERN_NOTICE
4626 "netdev_dma: no memory for net_dma.channels\n");
4627 return -ENOMEM;
4628 }
Dan Williamsd379b012007-07-09 11:56:42 -07004629 spin_lock_init(&net_dma.lock);
4630 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4631 dma_async_client_register(&net_dma.client);
4632 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004633 return 0;
4634}
4635
4636#else
4637static int __init netdev_dma_register(void) { return -ENODEV; }
4638#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639
Herbert Xu7f353bf2007-08-10 15:47:58 -07004640/**
4641 * netdev_compute_feature - compute conjunction of two feature sets
4642 * @all: first feature set
4643 * @one: second feature set
4644 *
4645 * Computes a new feature set after adding a device with feature set
4646 * @one to the master device with current feature set @all. Returns
4647 * the new feature set.
4648 */
4649int netdev_compute_features(unsigned long all, unsigned long one)
4650{
4651 /* if device needs checksumming, downgrade to hw checksumming */
4652 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4653 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4654
4655 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4656 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4657 all ^= NETIF_F_HW_CSUM
4658 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4659
4660 if (one & NETIF_F_GSO)
4661 one |= NETIF_F_GSO_SOFTWARE;
4662 one |= NETIF_F_GSO;
4663
4664 /* If even one device supports robust GSO, enable it for all. */
4665 if (one & NETIF_F_GSO_ROBUST)
4666 all |= NETIF_F_GSO_ROBUST;
4667
4668 all &= one | NETIF_F_LLTX;
4669
4670 if (!(all & NETIF_F_ALL_CSUM))
4671 all &= ~NETIF_F_SG;
4672 if (!(all & NETIF_F_SG))
4673 all &= ~NETIF_F_GSO_MASK;
4674
4675 return all;
4676}
4677EXPORT_SYMBOL(netdev_compute_features);
4678
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004679static struct hlist_head *netdev_create_hash(void)
4680{
4681 int i;
4682 struct hlist_head *hash;
4683
4684 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4685 if (hash != NULL)
4686 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4687 INIT_HLIST_HEAD(&hash[i]);
4688
4689 return hash;
4690}
4691
Eric W. Biederman881d9662007-09-17 11:56:21 -07004692/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004693static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004694{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004695 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004696
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004697 net->dev_name_head = netdev_create_hash();
4698 if (net->dev_name_head == NULL)
4699 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004700
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004701 net->dev_index_head = netdev_create_hash();
4702 if (net->dev_index_head == NULL)
4703 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004704
4705 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004706
4707err_idx:
4708 kfree(net->dev_name_head);
4709err_name:
4710 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004711}
4712
Arjan van de Ven6579e572008-07-21 13:31:48 -07004713char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4714{
4715 struct device_driver *driver;
4716 struct device *parent;
4717
4718 if (len <= 0 || !buffer)
4719 return buffer;
4720 buffer[0] = 0;
4721
4722 parent = dev->dev.parent;
4723
4724 if (!parent)
4725 return buffer;
4726
4727 driver = parent->driver;
4728 if (driver && driver->name)
4729 strlcpy(buffer, driver->name, len);
4730 return buffer;
4731}
4732
Pavel Emelyanov46650792007-10-08 20:38:39 -07004733static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004734{
4735 kfree(net->dev_name_head);
4736 kfree(net->dev_index_head);
4737}
4738
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004739static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004740 .init = netdev_init,
4741 .exit = netdev_exit,
4742};
4743
Pavel Emelyanov46650792007-10-08 20:38:39 -07004744static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004745{
4746 struct net_device *dev, *next;
4747 /*
4748 * Push all migratable of the network devices back to the
4749 * initial network namespace
4750 */
4751 rtnl_lock();
4752 for_each_netdev_safe(net, dev, next) {
4753 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004754 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004755
4756 /* Ignore unmoveable devices (i.e. loopback) */
4757 if (dev->features & NETIF_F_NETNS_LOCAL)
4758 continue;
4759
4760 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004761 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4762 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004763 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004764 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004765 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004766 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004767 }
4768 }
4769 rtnl_unlock();
4770}
4771
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004772static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004773 .exit = default_device_exit,
4774};
4775
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776/*
4777 * Initialize the DEV module. At boot time this walks the device list and
4778 * unhooks any devices that fail to initialise (normally hardware not
4779 * present) and leaves us with a valid list of present and active devices.
4780 *
4781 */
4782
4783/*
4784 * This is called single threaded during boot, so no need
4785 * to take the rtnl semaphore.
4786 */
4787static int __init net_dev_init(void)
4788{
4789 int i, rc = -ENOMEM;
4790
4791 BUG_ON(!dev_boot_phase);
4792
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793 if (dev_proc_init())
4794 goto out;
4795
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004796 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797 goto out;
4798
4799 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004800 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 INIT_LIST_HEAD(&ptype_base[i]);
4802
Eric W. Biederman881d9662007-09-17 11:56:21 -07004803 if (register_pernet_subsys(&netdev_net_ops))
4804 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
Eric W. Biedermance286d32007-09-12 13:53:49 +02004806 if (register_pernet_device(&default_device_ops))
4807 goto out;
4808
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 /*
4810 * Initialise the packet receive queues.
4811 */
4812
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004813 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 struct softnet_data *queue;
4815
4816 queue = &per_cpu(softnet_data, i);
4817 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004818 queue->completion_queue = NULL;
4819 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004820
4821 queue->backlog.poll = process_backlog;
4822 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 }
4824
Chris Leechdb217332006-06-17 21:24:58 -07004825 netdev_dma_register();
4826
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 dev_boot_phase = 0;
4828
Carlos R. Mafra962cf362008-05-15 11:15:37 -03004829 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4830 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831
4832 hotcpu_notifier(dev_cpu_callback, 0);
4833 dst_init();
4834 dev_mcast_init();
4835 rc = 0;
4836out:
4837 return rc;
4838}
4839
4840subsys_initcall(net_dev_init);
4841
4842EXPORT_SYMBOL(__dev_get_by_index);
4843EXPORT_SYMBOL(__dev_get_by_name);
4844EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004845EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846EXPORT_SYMBOL(dev_add_pack);
4847EXPORT_SYMBOL(dev_alloc_name);
4848EXPORT_SYMBOL(dev_close);
4849EXPORT_SYMBOL(dev_get_by_flags);
4850EXPORT_SYMBOL(dev_get_by_index);
4851EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852EXPORT_SYMBOL(dev_open);
4853EXPORT_SYMBOL(dev_queue_xmit);
4854EXPORT_SYMBOL(dev_remove_pack);
4855EXPORT_SYMBOL(dev_set_allmulti);
4856EXPORT_SYMBOL(dev_set_promiscuity);
4857EXPORT_SYMBOL(dev_change_flags);
4858EXPORT_SYMBOL(dev_set_mtu);
4859EXPORT_SYMBOL(dev_set_mac_address);
4860EXPORT_SYMBOL(free_netdev);
4861EXPORT_SYMBOL(netdev_boot_setup_check);
4862EXPORT_SYMBOL(netdev_set_master);
4863EXPORT_SYMBOL(netdev_state_change);
4864EXPORT_SYMBOL(netif_receive_skb);
4865EXPORT_SYMBOL(netif_rx);
4866EXPORT_SYMBOL(register_gifconf);
4867EXPORT_SYMBOL(register_netdevice);
4868EXPORT_SYMBOL(register_netdevice_notifier);
4869EXPORT_SYMBOL(skb_checksum_help);
4870EXPORT_SYMBOL(synchronize_net);
4871EXPORT_SYMBOL(unregister_netdevice);
4872EXPORT_SYMBOL(unregister_netdevice_notifier);
4873EXPORT_SYMBOL(net_enable_timestamp);
4874EXPORT_SYMBOL(net_disable_timestamp);
4875EXPORT_SYMBOL(dev_get_flags);
4876
4877#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4878EXPORT_SYMBOL(br_handle_frame_hook);
4879EXPORT_SYMBOL(br_fdb_get_hook);
4880EXPORT_SYMBOL(br_fdb_put_hook);
4881#endif
4882
4883#ifdef CONFIG_KMOD
4884EXPORT_SYMBOL(dev_load);
4885#endif
4886
4887EXPORT_PER_CPU_SYMBOL(softnet_data);