blob: e719ed29310ff5695c347db907a6311aa79e36c5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700130#include "net-sysfs.h"
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/*
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
135 *
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
138 *
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700143 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 * --BLG
145 *
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
158 */
159
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800160#define PTYPE_HASH_SIZE (16)
161#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800164static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700165static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Chris Leechdb217332006-06-17 21:24:58 -0700167#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700168struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700172 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700173};
174
175static enum dma_state_client
176netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
178
179static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
182 },
183};
Chris Leechdb217332006-06-17 21:24:58 -0700184#endif
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * semaphore.
189 *
190 * Pure readers hold dev_base_lock for reading.
191 *
192 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700193 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
196 *
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
200 *
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
204 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205DEFINE_RWLOCK(dev_base_lock);
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207EXPORT_SYMBOL(dev_base_lock);
208
209#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Eric W. Biederman881d9662007-09-17 11:56:21 -0700212static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biederman881d9662007-09-17 11:56:21 -0700218static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
Eric W. Biedermance286d32007-09-12 13:53:49 +0200223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900226 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
236}
237
238/* Device list removal */
239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
249}
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/*
252 * Our notifier list
253 */
254
Alan Sternf07d5b92006-05-09 15:23:03 -0700255static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257/*
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
260 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700261
262DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
David S. Millercf508b12008-07-22 14:16:42 -0700264#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700265/*
David S. Millerc773e842008-07-08 23:13:53 -0700266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700267 * according to dev->type
268 */
269static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
285
286static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700305
306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
307{
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
315}
316
David S. Millercf508b12008-07-22 14:16:42 -0700317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700319{
320 int i;
321
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
325}
David S. Millercf508b12008-07-22 14:16:42 -0700326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700336#else
David S. Millercf508b12008-07-22 14:16:42 -0700337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700342{
343}
344#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/*******************************************************************************
347
348 Protocol management and registration routines
349
350*******************************************************************************/
351
352/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
356 *
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
366 */
367
368/**
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
371 *
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
375 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900376 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
379 */
380
381void dev_add_pack(struct packet_type *pt)
382{
383 int hash;
384
385 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700386 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700388 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 list_add_rcu(&pt->list, &ptype_base[hash]);
391 }
392 spin_unlock_bh(&ptype_lock);
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395/**
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
398 *
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900402 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 *
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
407 */
408void __dev_remove_pack(struct packet_type *pt)
409{
410 struct list_head *head;
411 struct packet_type *pt1;
412
413 spin_lock_bh(&ptype_lock);
414
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700415 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700417 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
424 }
425 }
426
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428out:
429 spin_unlock_bh(&ptype_lock);
430}
431/**
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
434 *
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
439 *
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
442 */
443void dev_remove_pack(struct packet_type *pt)
444{
445 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 synchronize_net();
448}
449
450/******************************************************************************
451
452 Device Boot-time Settings Routines
453
454*******************************************************************************/
455
456/* Boot time configuration table */
457static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
458
459/**
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
463 *
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
467 */
468static int netdev_boot_setup_add(char *name, struct ifmap *map)
469{
470 struct netdev_boot_setup *s;
471 int i;
472
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700477 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
480 }
481 }
482
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
484}
485
486/**
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
489 *
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
494 */
495int netdev_boot_setup_check(struct net_device *dev)
496{
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
499
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700502 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
508 }
509 }
510 return 0;
511}
512
513
514/**
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
518 *
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
523 */
524unsigned long netdev_boot_base(const char *prefix, int unit)
525{
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
529
530 sprintf(name, "%s%d", prefix, unit);
531
532 /*
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
535 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700536 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 1;
538
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
543}
544
545/*
546 * Saves at boot time configured settings for any netdevice.
547 */
548int __init netdev_boot_setup(char *str)
549{
550 int ints[5];
551 struct ifmap map;
552
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
556
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
567
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
570}
571
572__setup("netdev=", netdev_boot_setup);
573
574/*******************************************************************************
575
576 Device Interface Subroutines
577
578*******************************************************************************/
579
580/**
581 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700582 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 * @name: name to find
584 *
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
590 */
591
Eric W. Biederman881d9662007-09-17 11:56:21 -0700592struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593{
594 struct hlist_node *p;
595
Eric W. Biederman881d9662007-09-17 11:56:21 -0700596 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
601 }
602 return NULL;
603}
604
605/**
606 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700607 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 * @name: name to find
609 *
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
615 */
616
Eric W. Biederman881d9662007-09-17 11:56:21 -0700617struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
619 struct net_device *dev;
620
621 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
627}
628
629/**
630 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700631 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 * @ifindex: index of device
633 *
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
639 */
640
Eric W. Biederman881d9662007-09-17 11:56:21 -0700641struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 struct hlist_node *p;
644
Eric W. Biederman881d9662007-09-17 11:56:21 -0700645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
650 }
651 return NULL;
652}
653
654
655/**
656 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700657 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 * @ifindex: index of device
659 *
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
664 */
665
Eric W. Biederman881d9662007-09-17 11:56:21 -0700666struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
668 struct net_device *dev;
669
670 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700671 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
676}
677
678/**
679 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700680 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 * @type: media type of device
682 * @ha: hardware address
683 *
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
688 *
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
691 */
692
Eric W. Biederman881d9662007-09-17 11:56:21 -0700693struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 struct net_device *dev;
696
697 ASSERT_RTNL();
698
Denis V. Lunev81103a52007-12-12 10:47:38 -0800699 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700702 return dev;
703
704 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705}
706
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300707EXPORT_SYMBOL(dev_getbyhwaddr);
708
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710{
711 struct net_device *dev;
712
713 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700714 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700715 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700716 return dev;
717
718 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719}
720
721EXPORT_SYMBOL(__dev_getfirstbyhwtype);
722
Eric W. Biederman881d9662007-09-17 11:56:21 -0700723struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724{
725 struct net_device *dev;
726
727 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700728 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700729 if (dev)
730 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 rtnl_unlock();
732 return dev;
733}
734
735EXPORT_SYMBOL(dev_getfirstbyhwtype);
736
737/**
738 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700739 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
742 *
743 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900744 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
747 */
748
Eric W. Biederman881d9662007-09-17 11:56:21 -0700749struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700751 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700755 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700758 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 break;
760 }
761 }
762 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700763 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764}
765
766/**
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
769 *
770 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800774int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700776 if (*name == '\0')
777 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
782
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
787 }
788 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
791/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200795 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 *
797 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 */
805
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200806static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
808 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700811 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct net_device *d;
813
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
816 /*
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
820 */
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
823
824 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (!inuse)
827 return -ENOMEM;
828
Eric W. Biederman881d9662007-09-17 11:56:21 -0700829 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
834
835 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200836 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
839 }
840
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
843 }
844
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
852 */
853 return -ENFILE;
854}
855
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200856/**
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
868 */
869
870int dev_alloc_name(struct net_device *dev, const char *name)
871{
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
875
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
882}
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
893int dev_change_name(struct net_device *dev, char *newname)
894{
Herbert Xufcc5a032007-07-30 17:03:38 -0700895 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700898 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900901 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900903 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913 memcpy(oldname, dev->name, IFNAMSIZ);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
919 strcpy(newname, dev->name);
920 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700921 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -EEXIST;
923 else
924 strlcpy(dev->name, newname, IFNAMSIZ);
925
Herbert Xufcc5a032007-07-30 17:03:38 -0700926rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700927 err = device_rename(&dev->dev, dev->name);
928 if (err) {
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 return err;
931 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700932
933 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600934 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700936 write_unlock_bh(&dev_base_lock);
937
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700939 ret = notifier_to_errno(ret);
940
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
950 }
951 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
953 return err;
954}
955
956/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700957 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700958 * @dev: device to cause notification
959 *
960 * Called to indicate a device has changed features.
961 */
962void netdev_features_change(struct net_device *dev)
963{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700964 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700965}
966EXPORT_SYMBOL(netdev_features_change);
967
968/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 * netdev_state_change - device changes state
970 * @dev: device to cause notification
971 *
972 * Called to indicate a device has changed state. This function calls
973 * the notifier chains for netdev_chain and sends a NEWLINK message
974 * to the routing socket.
975 */
976void netdev_state_change(struct net_device *dev)
977{
978 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700979 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
981 }
982}
983
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700984void netdev_bonding_change(struct net_device *dev)
985{
986 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
987}
988EXPORT_SYMBOL(netdev_bonding_change);
989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990/**
991 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700992 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 * @name: name of interface
994 *
995 * If a network interface is not present and the process has suitable
996 * privileges this function loads the module. If module loading is not
997 * available in this kernel then it becomes a nop.
998 */
999
Eric W. Biederman881d9662007-09-17 11:56:21 -07001000void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001002 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001005 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 read_unlock(&dev_base_lock);
1007
1008 if (!dev && capable(CAP_SYS_MODULE))
1009 request_module("%s", name);
1010}
1011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012/**
1013 * dev_open - prepare an interface for use.
1014 * @dev: device to open
1015 *
1016 * Takes a device from down to up state. The device's private open
1017 * function is invoked and then the multicast lists are loaded. Finally
1018 * the device is moved into the up state and a %NETDEV_UP message is
1019 * sent to the netdev notifier chain.
1020 *
1021 * Calling this function on an active interface is a nop. On a failure
1022 * a negative errno code is returned.
1023 */
1024int dev_open(struct net_device *dev)
1025{
1026 int ret = 0;
1027
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001028 ASSERT_RTNL();
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 /*
1031 * Is it already up?
1032 */
1033
1034 if (dev->flags & IFF_UP)
1035 return 0;
1036
1037 /*
1038 * Is it even present?
1039 */
1040 if (!netif_device_present(dev))
1041 return -ENODEV;
1042
1043 /*
1044 * Call device private open method
1045 */
1046 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001047
1048 if (dev->validate_addr)
1049 ret = dev->validate_addr(dev);
1050
1051 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001054 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 * If it went open OK then:
1056 */
1057
Jeff Garzikbada3392007-10-23 20:19:37 -07001058 if (ret)
1059 clear_bit(__LINK_STATE_START, &dev->state);
1060 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 /*
1062 * Set the flags.
1063 */
1064 dev->flags |= IFF_UP;
1065
1066 /*
1067 * Initialize multicasting status
1068 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001069 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070
1071 /*
1072 * Wakeup transmit queue engine
1073 */
1074 dev_activate(dev);
1075
1076 /*
1077 * ... and announce new interface.
1078 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001079 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 return ret;
1083}
1084
1085/**
1086 * dev_close - shutdown an interface.
1087 * @dev: device to shutdown
1088 *
1089 * This function moves an active device into down state. A
1090 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1091 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1092 * chain.
1093 */
1094int dev_close(struct net_device *dev)
1095{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001096 ASSERT_RTNL();
1097
David S. Miller9d5010d2007-09-12 14:33:25 +02001098 might_sleep();
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (!(dev->flags & IFF_UP))
1101 return 0;
1102
1103 /*
1104 * Tell people we are going down, so that they can
1105 * prepare to death, when device is still operating.
1106 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001107 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 clear_bit(__LINK_STATE_START, &dev->state);
1110
1111 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001112 * it can be even on different cpu. So just clear netif_running().
1113 *
1114 * dev->stop() will invoke napi_disable() on all of it's
1115 * napi_struct instances on this device.
1116 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001119 dev_deactivate(dev);
1120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 /*
1122 * Call the device specific close. This cannot fail.
1123 * Only if device is UP
1124 *
1125 * We allow it to be called even after a DETACH hot-plug
1126 * event.
1127 */
1128 if (dev->stop)
1129 dev->stop(dev);
1130
1131 /*
1132 * Device is now down.
1133 */
1134
1135 dev->flags &= ~IFF_UP;
1136
1137 /*
1138 * Tell people we are down
1139 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001140 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 return 0;
1143}
1144
1145
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001146/**
1147 * dev_disable_lro - disable Large Receive Offload on a device
1148 * @dev: device
1149 *
1150 * Disable Large Receive Offload (LRO) on a net device. Must be
1151 * called under RTNL. This is needed if received packets may be
1152 * forwarded to another interface.
1153 */
1154void dev_disable_lro(struct net_device *dev)
1155{
1156 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1157 dev->ethtool_ops->set_flags) {
1158 u32 flags = dev->ethtool_ops->get_flags(dev);
1159 if (flags & ETH_FLAG_LRO) {
1160 flags &= ~ETH_FLAG_LRO;
1161 dev->ethtool_ops->set_flags(dev, flags);
1162 }
1163 }
1164 WARN_ON(dev->features & NETIF_F_LRO);
1165}
1166EXPORT_SYMBOL(dev_disable_lro);
1167
1168
Eric W. Biederman881d9662007-09-17 11:56:21 -07001169static int dev_boot_phase = 1;
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171/*
1172 * Device change register/unregister. These are not inline or static
1173 * as we export them to the world.
1174 */
1175
1176/**
1177 * register_netdevice_notifier - register a network notifier block
1178 * @nb: notifier
1179 *
1180 * Register a notifier to be called when network device events occur.
1181 * The notifier passed is linked into the kernel structures and must
1182 * not be reused until it has been unregistered. A negative errno code
1183 * is returned on a failure.
1184 *
1185 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001186 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 * view of the network device list.
1188 */
1189
1190int register_netdevice_notifier(struct notifier_block *nb)
1191{
1192 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001193 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 int err;
1196
1197 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001198 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001199 if (err)
1200 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001201 if (dev_boot_phase)
1202 goto unlock;
1203 for_each_net(net) {
1204 for_each_netdev(net, dev) {
1205 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1206 err = notifier_to_errno(err);
1207 if (err)
1208 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Eric W. Biederman881d9662007-09-17 11:56:21 -07001210 if (!(dev->flags & IFF_UP))
1211 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001212
Eric W. Biederman881d9662007-09-17 11:56:21 -07001213 nb->notifier_call(nb, NETDEV_UP, dev);
1214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001216
1217unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 rtnl_unlock();
1219 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001220
1221rollback:
1222 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001223 for_each_net(net) {
1224 for_each_netdev(net, dev) {
1225 if (dev == last)
1226 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001227
Eric W. Biederman881d9662007-09-17 11:56:21 -07001228 if (dev->flags & IFF_UP) {
1229 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1230 nb->notifier_call(nb, NETDEV_DOWN, dev);
1231 }
1232 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001233 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001234 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001235
1236 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001237 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
1240/**
1241 * unregister_netdevice_notifier - unregister a network notifier block
1242 * @nb: notifier
1243 *
1244 * Unregister a notifier previously registered by
1245 * register_netdevice_notifier(). The notifier is unlinked into the
1246 * kernel structures and may then be reused. A negative errno code
1247 * is returned on a failure.
1248 */
1249
1250int unregister_netdevice_notifier(struct notifier_block *nb)
1251{
Herbert Xu9f514952006-03-25 01:24:25 -08001252 int err;
1253
1254 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001255 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001256 rtnl_unlock();
1257 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258}
1259
1260/**
1261 * call_netdevice_notifiers - call all network notifier blocks
1262 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001263 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 *
1265 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001266 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 */
1268
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001269int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001271 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
1273
1274/* When > 0 there are consumers of rx skb time stamps */
1275static atomic_t netstamp_needed = ATOMIC_INIT(0);
1276
1277void net_enable_timestamp(void)
1278{
1279 atomic_inc(&netstamp_needed);
1280}
1281
1282void net_disable_timestamp(void)
1283{
1284 atomic_dec(&netstamp_needed);
1285}
1286
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001287static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
1289 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001290 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001291 else
1292 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293}
1294
1295/*
1296 * Support routine. Sends outgoing frames to any network
1297 * taps currently in use.
1298 */
1299
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001300static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001303
1304 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
1306 rcu_read_lock();
1307 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1308 /* Never send packets back to the socket
1309 * they originated from - MvS (miquels@drinkel.ow.org)
1310 */
1311 if ((ptype->dev == dev || !ptype->dev) &&
1312 (ptype->af_packet_priv == NULL ||
1313 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1314 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1315 if (!skb2)
1316 break;
1317
1318 /* skb->nh should be correctly
1319 set by sender, so that the second statement is
1320 just protection against buggy protocols.
1321 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001322 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001324 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001325 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (net_ratelimit())
1327 printk(KERN_CRIT "protocol %04x is "
1328 "buggy, dev %s\n",
1329 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001330 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 }
1332
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001333 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001335 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 }
1337 }
1338 rcu_read_unlock();
1339}
1340
Denis Vlasenko56079432006-03-29 15:57:29 -08001341
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001342static inline void __netif_reschedule(struct Qdisc *q)
1343{
1344 struct softnet_data *sd;
1345 unsigned long flags;
1346
1347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 q->next_sched = sd->output_queue;
1350 sd->output_queue = q;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1353}
1354
David S. Miller37437bb2008-07-16 02:15:04 -07001355void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001356{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001359}
1360EXPORT_SYMBOL(__netif_schedule);
1361
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001362void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001363{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001364 if (atomic_dec_and_test(&skb->users)) {
1365 struct softnet_data *sd;
1366 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001367
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001368 local_irq_save(flags);
1369 sd = &__get_cpu_var(softnet_data);
1370 skb->next = sd->completion_queue;
1371 sd->completion_queue = skb;
1372 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1373 local_irq_restore(flags);
1374 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001375}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001376EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001377
1378void dev_kfree_skb_any(struct sk_buff *skb)
1379{
1380 if (in_irq() || irqs_disabled())
1381 dev_kfree_skb_irq(skb);
1382 else
1383 dev_kfree_skb(skb);
1384}
1385EXPORT_SYMBOL(dev_kfree_skb_any);
1386
1387
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001388/**
1389 * netif_device_detach - mark device as removed
1390 * @dev: network device
1391 *
1392 * Mark device as removed from system and therefore no longer available.
1393 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001394void netif_device_detach(struct net_device *dev)
1395{
1396 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1397 netif_running(dev)) {
1398 netif_stop_queue(dev);
1399 }
1400}
1401EXPORT_SYMBOL(netif_device_detach);
1402
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001403/**
1404 * netif_device_attach - mark device as attached
1405 * @dev: network device
1406 *
1407 * Mark device as attached from system and restart if needed.
1408 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001409void netif_device_attach(struct net_device *dev)
1410{
1411 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1412 netif_running(dev)) {
1413 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001414 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001415 }
1416}
1417EXPORT_SYMBOL(netif_device_attach);
1418
Ben Hutchings6de329e2008-06-16 17:02:28 -07001419static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1420{
1421 return ((features & NETIF_F_GEN_CSUM) ||
1422 ((features & NETIF_F_IP_CSUM) &&
1423 protocol == htons(ETH_P_IP)) ||
1424 ((features & NETIF_F_IPV6_CSUM) &&
1425 protocol == htons(ETH_P_IPV6)));
1426}
1427
1428static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1429{
1430 if (can_checksum_protocol(dev->features, skb->protocol))
1431 return true;
1432
1433 if (skb->protocol == htons(ETH_P_8021Q)) {
1434 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1435 if (can_checksum_protocol(dev->features & dev->vlan_features,
1436 veh->h_vlan_encapsulated_proto))
1437 return true;
1438 }
1439
1440 return false;
1441}
Denis Vlasenko56079432006-03-29 15:57:29 -08001442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443/*
1444 * Invalidate hardware checksum when packet is to be mangled, and
1445 * complete checksum manually on outgoing path.
1446 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001447int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448{
Al Virod3bc23e2006-11-14 21:24:49 -08001449 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001450 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Patrick McHardy84fa7932006-08-29 16:44:56 -07001452 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001453 goto out_set_summed;
1454
1455 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001456 /* Let GSO fix up the checksum. */
1457 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 }
1459
Herbert Xua0308472007-10-15 01:47:15 -07001460 offset = skb->csum_start - skb_headroom(skb);
1461 BUG_ON(offset >= skb_headlen(skb));
1462 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1463
1464 offset += skb->csum_offset;
1465 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1466
1467 if (skb_cloned(skb) &&
1468 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1470 if (ret)
1471 goto out;
1472 }
1473
Herbert Xua0308472007-10-15 01:47:15 -07001474 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001475out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001477out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return ret;
1479}
1480
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001481/**
1482 * skb_gso_segment - Perform segmentation on skb.
1483 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001484 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001485 *
1486 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001487 *
1488 * It may return NULL if the skb requires no segmentation. This is
1489 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001490 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001491struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001492{
1493 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1494 struct packet_type *ptype;
Al Viro252e33462006-11-14 20:48:11 -08001495 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001496 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001497
1498 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001499
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001500 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001501 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001502 __skb_pull(skb, skb->mac_len);
1503
Herbert Xuf9d106a2007-04-23 22:36:13 -07001504 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001505 if (skb_header_cloned(skb) &&
1506 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1507 return ERR_PTR(err);
1508 }
1509
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001510 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001511 list_for_each_entry_rcu(ptype,
1512 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001513 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001514 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001515 err = ptype->gso_send_check(skb);
1516 segs = ERR_PTR(err);
1517 if (err || skb_gso_ok(skb, features))
1518 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001519 __skb_push(skb, (skb->data -
1520 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001521 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001522 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001523 break;
1524 }
1525 }
1526 rcu_read_unlock();
1527
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001528 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001529
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001530 return segs;
1531}
1532
1533EXPORT_SYMBOL(skb_gso_segment);
1534
Herbert Xufb286bb2005-11-10 13:01:24 -08001535/* Take action when hardware reception checksum errors are detected. */
1536#ifdef CONFIG_BUG
1537void netdev_rx_csum_fault(struct net_device *dev)
1538{
1539 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001540 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001541 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001542 dump_stack();
1543 }
1544}
1545EXPORT_SYMBOL(netdev_rx_csum_fault);
1546#endif
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548/* Actually, we should eliminate this check as soon as we know, that:
1549 * 1. IOMMU is present and allows to map all the memory.
1550 * 2. No high memory really exists on this machine.
1551 */
1552
1553static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1554{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001555#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 int i;
1557
1558 if (dev->features & NETIF_F_HIGHDMA)
1559 return 0;
1560
1561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1562 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1563 return 1;
1564
Herbert Xu3d3a8532006-06-27 13:33:10 -07001565#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 return 0;
1567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001569struct dev_gso_cb {
1570 void (*destructor)(struct sk_buff *skb);
1571};
1572
1573#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1574
1575static void dev_gso_skb_destructor(struct sk_buff *skb)
1576{
1577 struct dev_gso_cb *cb;
1578
1579 do {
1580 struct sk_buff *nskb = skb->next;
1581
1582 skb->next = nskb->next;
1583 nskb->next = NULL;
1584 kfree_skb(nskb);
1585 } while (skb->next);
1586
1587 cb = DEV_GSO_CB(skb);
1588 if (cb->destructor)
1589 cb->destructor(skb);
1590}
1591
1592/**
1593 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1594 * @skb: buffer to segment
1595 *
1596 * This function segments the given skb and stores the list of segments
1597 * in skb->next.
1598 */
1599static int dev_gso_segment(struct sk_buff *skb)
1600{
1601 struct net_device *dev = skb->dev;
1602 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001603 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1604 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605
Herbert Xu576a30e2006-06-27 13:22:38 -07001606 segs = skb_gso_segment(skb, features);
1607
1608 /* Verifying header integrity only. */
1609 if (!segs)
1610 return 0;
1611
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001612 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001613 return PTR_ERR(segs);
1614
1615 skb->next = segs;
1616 DEV_GSO_CB(skb)->destructor = skb->destructor;
1617 skb->destructor = dev_gso_skb_destructor;
1618
1619 return 0;
1620}
1621
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001622int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1623 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001624{
1625 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001626 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001627 dev_queue_xmit_nit(skb, dev);
1628
Herbert Xu576a30e2006-06-27 13:22:38 -07001629 if (netif_needs_gso(dev, skb)) {
1630 if (unlikely(dev_gso_segment(skb)))
1631 goto out_kfree_skb;
1632 if (skb->next)
1633 goto gso;
1634 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001635
Herbert Xu576a30e2006-06-27 13:22:38 -07001636 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001637 }
1638
Herbert Xu576a30e2006-06-27 13:22:38 -07001639gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001640 do {
1641 struct sk_buff *nskb = skb->next;
1642 int rc;
1643
1644 skb->next = nskb->next;
1645 nskb->next = NULL;
1646 rc = dev->hard_start_xmit(nskb, dev);
1647 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001648 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001649 skb->next = nskb;
1650 return rc;
1651 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001652 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001653 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001654 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001655
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001656 skb->destructor = DEV_GSO_CB(skb)->destructor;
1657
1658out_kfree_skb:
1659 kfree_skb(skb);
1660 return 0;
1661}
1662
David S. Millerb6b2fed2008-07-21 09:48:06 -07001663static u32 simple_tx_hashrnd;
1664static int simple_tx_hashrnd_initialized = 0;
1665
David S. Miller8f0f2222008-07-15 03:47:03 -07001666static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1667{
David S. Millerb6b2fed2008-07-21 09:48:06 -07001668 u32 addr1, addr2, ports;
1669 u32 hash, ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001670 u8 ip_proto;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001671
1672 if (unlikely(!simple_tx_hashrnd_initialized)) {
1673 get_random_bytes(&simple_tx_hashrnd, 4);
1674 simple_tx_hashrnd_initialized = 1;
1675 }
David S. Miller8f0f2222008-07-15 03:47:03 -07001676
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001680 addr1 = ip_hdr(skb)->saddr;
1681 addr2 = ip_hdr(skb)->daddr;
David S. Miller8f0f2222008-07-15 03:47:03 -07001682 ihl = ip_hdr(skb)->ihl;
David S. Miller8f0f2222008-07-15 03:47:03 -07001683 break;
1684 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001686 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1687 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
David S. Miller8f0f2222008-07-15 03:47:03 -07001688 ihl = (40 >> 2);
David S. Miller8f0f2222008-07-15 03:47:03 -07001689 break;
1690 default:
1691 return 0;
1692 }
1693
David S. Miller8f0f2222008-07-15 03:47:03 -07001694
1695 switch (ip_proto) {
1696 case IPPROTO_TCP:
1697 case IPPROTO_UDP:
1698 case IPPROTO_DCCP:
1699 case IPPROTO_ESP:
1700 case IPPROTO_AH:
1701 case IPPROTO_SCTP:
1702 case IPPROTO_UDPLITE:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001703 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
David S. Miller8f0f2222008-07-15 03:47:03 -07001704 break;
1705
1706 default:
David S. Millerb6b2fed2008-07-21 09:48:06 -07001707 ports = 0;
David S. Miller8f0f2222008-07-15 03:47:03 -07001708 break;
1709 }
1710
David S. Millerb6b2fed2008-07-21 09:48:06 -07001711 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1712
1713 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001714}
1715
David S. Millere8a04642008-07-17 00:34:19 -07001716static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1717 struct sk_buff *skb)
1718{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001719 u16 queue_index = 0;
1720
David S. Millereae792b2008-07-15 03:03:33 -07001721 if (dev->select_queue)
1722 queue_index = dev->select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001723 else if (dev->real_num_tx_queues > 1)
1724 queue_index = simple_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001725
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001726 skb_set_queue_mapping(skb, queue_index);
1727 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001728}
1729
Dave Jonesd29f7492008-07-22 14:09:06 -07001730/**
1731 * dev_queue_xmit - transmit a buffer
1732 * @skb: buffer to transmit
1733 *
1734 * Queue a buffer for transmission to a network device. The caller must
1735 * have set the device and priority and built the buffer before calling
1736 * this function. The function can be called from an interrupt.
1737 *
1738 * A negative errno code is returned on a failure. A success does not
1739 * guarantee the frame will be transmitted as it may be dropped due
1740 * to congestion or traffic shaping.
1741 *
1742 * -----------------------------------------------------------------------------------
1743 * I notice this method can also return errors from the queue disciplines,
1744 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1745 * be positive.
1746 *
1747 * Regardless of the return value, the skb is consumed, so it is currently
1748 * difficult to retry a send to this method. (You can bump the ref count
1749 * before sending to hold a reference for retry if you are careful.)
1750 *
1751 * When calling this method, interrupts MUST be enabled. This is because
1752 * the BH enable code must have IRQs enabled so that it will not deadlock.
1753 * --BLG
1754 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755int dev_queue_xmit(struct sk_buff *skb)
1756{
1757 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001758 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 struct Qdisc *q;
1760 int rc = -ENOMEM;
1761
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001762 /* GSO will handle the following emulations directly. */
1763 if (netif_needs_gso(dev, skb))
1764 goto gso;
1765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 if (skb_shinfo(skb)->frag_list &&
1767 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001768 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 goto out_kfree_skb;
1770
1771 /* Fragmented skb is linearized if device does not support SG,
1772 * or if at least one of fragments is in highmem and device
1773 * does not support DMA from it.
1774 */
1775 if (skb_shinfo(skb)->nr_frags &&
1776 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001777 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 goto out_kfree_skb;
1779
1780 /* If packet is not checksummed and device does not support
1781 * checksumming for this protocol, complete checksumming here.
1782 */
Herbert Xu663ead32007-04-09 11:59:07 -07001783 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1784 skb_set_transport_header(skb, skb->csum_start -
1785 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001786 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1787 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001790gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001791 /* Disable soft irqs for various locks below. Also
1792 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001794 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
David S. Millereae792b2008-07-15 03:03:33 -07001796 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001797 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799#ifdef CONFIG_NET_CLS_ACT
1800 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1801#endif
1802 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001803 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
David S. Miller37437bb2008-07-16 02:15:04 -07001805 spin_lock(root_lock);
1806
David S. Millera9312ae2008-08-17 21:51:03 -07001807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001808 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001809 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001813 }
David S. Miller37437bb2008-07-16 02:15:04 -07001814 spin_unlock(root_lock);
1815
David S. Miller37437bb2008-07-16 02:15:04 -07001816 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
1818
1819 /* The device has no queue. Common case for software devices:
1820 loopback, all the sorts of tunnels...
1821
Herbert Xu932ff272006-06-09 12:20:56 -07001822 Really, it is unlikely that netif_tx_lock protection is necessary
1823 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 counters.)
1825 However, it is possible, that they rely on protection
1826 made by us here.
1827
1828 Check this and shot the lock. It is not prone from deadlocks.
1829 Either shot noqueue qdisc, it is even simpler 8)
1830 */
1831 if (dev->flags & IFF_UP) {
1832 int cpu = smp_processor_id(); /* ok because BHs are off */
1833
David S. Millerc773e842008-07-08 23:13:53 -07001834 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
David S. Millerc773e842008-07-08 23:13:53 -07001836 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001838 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001840 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001841 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 goto out;
1843 }
1844 }
David S. Millerc773e842008-07-08 23:13:53 -07001845 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 if (net_ratelimit())
1847 printk(KERN_CRIT "Virtual device %s asks to "
1848 "queue packet!\n", dev->name);
1849 } else {
1850 /* Recursion is detected! It is possible,
1851 * unfortunately */
1852 if (net_ratelimit())
1853 printk(KERN_CRIT "Dead loop on virtual device "
1854 "%s, fix it urgently!\n", dev->name);
1855 }
1856 }
1857
1858 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001859 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
1861out_kfree_skb:
1862 kfree_skb(skb);
1863 return rc;
1864out:
Herbert Xud4828d82006-06-22 02:28:18 -07001865 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 return rc;
1867}
1868
1869
1870/*=======================================================================
1871 Receiver routines
1872 =======================================================================*/
1873
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001874int netdev_max_backlog __read_mostly = 1000;
1875int netdev_budget __read_mostly = 300;
1876int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877
1878DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1879
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881/**
1882 * netif_rx - post buffer to the network code
1883 * @skb: buffer to post
1884 *
1885 * This function receives a packet from a device driver and queues it for
1886 * the upper (protocol) levels to process. It always succeeds. The buffer
1887 * may be dropped during processing for congestion control or by the
1888 * protocol layers.
1889 *
1890 * return values:
1891 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 * NET_RX_DROP (packet was dropped)
1893 *
1894 */
1895
1896int netif_rx(struct sk_buff *skb)
1897{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 struct softnet_data *queue;
1899 unsigned long flags;
1900
1901 /* if netpoll wants it, pretend we never saw it */
1902 if (netpoll_rx(skb))
1903 return NET_RX_DROP;
1904
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001905 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001906 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
1908 /*
1909 * The code is rearranged so that the path is the most
1910 * short when CPU is congested, but is still operating.
1911 */
1912 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 queue = &__get_cpu_var(softnet_data);
1914
1915 __get_cpu_var(netdev_rx_stat).total++;
1916 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1917 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001921 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 }
1923
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001924 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 goto enqueue;
1926 }
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 __get_cpu_var(netdev_rx_stat).dropped++;
1929 local_irq_restore(flags);
1930
1931 kfree_skb(skb);
1932 return NET_RX_DROP;
1933}
1934
1935int netif_rx_ni(struct sk_buff *skb)
1936{
1937 int err;
1938
1939 preempt_disable();
1940 err = netif_rx(skb);
1941 if (local_softirq_pending())
1942 do_softirq();
1943 preempt_enable();
1944
1945 return err;
1946}
1947
1948EXPORT_SYMBOL(netif_rx_ni);
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950static void net_tx_action(struct softirq_action *h)
1951{
1952 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1953
1954 if (sd->completion_queue) {
1955 struct sk_buff *clist;
1956
1957 local_irq_disable();
1958 clist = sd->completion_queue;
1959 sd->completion_queue = NULL;
1960 local_irq_enable();
1961
1962 while (clist) {
1963 struct sk_buff *skb = clist;
1964 clist = clist->next;
1965
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001966 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 __kfree_skb(skb);
1968 }
1969 }
1970
1971 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07001972 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974 local_irq_disable();
1975 head = sd->output_queue;
1976 sd->output_queue = NULL;
1977 local_irq_enable();
1978
1979 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07001980 struct Qdisc *q = head;
1981 spinlock_t *root_lock;
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 head = head->next_sched;
1984
David S. Miller5fb66222008-08-02 20:02:43 -07001985 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07001986 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07001990 qdisc_run(q);
1991 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 } else {
David S. Miller195648b2008-08-19 04:00:36 -07001993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07001994 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07001995 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07001996 } else {
1997 smp_mb__before_clear_bit();
1998 clear_bit(__QDISC_STATE_SCHED,
1999 &q->state);
2000 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 }
2002 }
2003 }
2004}
2005
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002006static inline int deliver_skb(struct sk_buff *skb,
2007 struct packet_type *pt_prev,
2008 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009{
2010 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002011 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012}
2013
2014#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07002015/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016struct net_bridge;
2017struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2018 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002019void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
Stephen Hemminger6229e362007-03-21 13:38:47 -07002021/*
2022 * If bridge module is loaded call bridging hook.
2023 * returns NULL if packet was consumed.
2024 */
2025struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2026 struct sk_buff *skb) __read_mostly;
2027static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2028 struct packet_type **pt_prev, int *ret,
2029 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 struct net_bridge_port *port;
2032
Stephen Hemminger6229e362007-03-21 13:38:47 -07002033 if (skb->pkt_type == PACKET_LOOPBACK ||
2034 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2035 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002038 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002040 }
2041
Stephen Hemminger6229e362007-03-21 13:38:47 -07002042 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002045#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046#endif
2047
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002048#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2049struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2050EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2051
2052static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2053 struct packet_type **pt_prev,
2054 int *ret,
2055 struct net_device *orig_dev)
2056{
2057 if (skb->dev->macvlan_port == NULL)
2058 return skb;
2059
2060 if (*pt_prev) {
2061 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2062 *pt_prev = NULL;
2063 }
2064 return macvlan_handle_frame_hook(skb);
2065}
2066#else
2067#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2068#endif
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070#ifdef CONFIG_NET_CLS_ACT
2071/* TODO: Maybe we should just force sch_ingress to be compiled in
2072 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2073 * a compare and 2 stores extra right now if we dont have it on
2074 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002075 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 * the ingress scheduler, you just cant add policies on ingress.
2077 *
2078 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002079static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002082 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002083 struct netdev_queue *rxq;
2084 int result = TC_ACT_OK;
2085 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002086
Herbert Xuf697c3e2007-10-14 00:38:47 -07002087 if (MAX_RED_LOOP < ttl++) {
2088 printk(KERN_WARNING
2089 "Redir loop detected Dropping packet (%d->%d)\n",
2090 skb->iif, dev->ifindex);
2091 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 }
2093
Herbert Xuf697c3e2007-10-14 00:38:47 -07002094 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2095 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2096
David S. Miller555353c2008-07-08 17:33:13 -07002097 rxq = &dev->rx_queue;
2098
David S. Miller83874002008-07-17 00:53:03 -07002099 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002100 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002101 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002102 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2103 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002104 spin_unlock(qdisc_lock(q));
2105 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 return result;
2108}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002109
2110static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2111 struct packet_type **pt_prev,
2112 int *ret, struct net_device *orig_dev)
2113{
David S. Miller8d50b532008-07-30 02:37:46 -07002114 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002115 goto out;
2116
2117 if (*pt_prev) {
2118 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2119 *pt_prev = NULL;
2120 } else {
2121 /* Huh? Why does turning on AF_PACKET affect this? */
2122 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2123 }
2124
2125 switch (ing_filter(skb)) {
2126 case TC_ACT_SHOT:
2127 case TC_ACT_STOLEN:
2128 kfree_skb(skb);
2129 return NULL;
2130 }
2131
2132out:
2133 skb->tc_verd = 0;
2134 return skb;
2135}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136#endif
2137
Patrick McHardybc1d0412008-07-14 22:49:30 -07002138/*
2139 * netif_nit_deliver - deliver received packets to network taps
2140 * @skb: buffer
2141 *
2142 * This function is used to deliver incoming packets to network
2143 * taps. It should be used when the normal netif_receive_skb path
2144 * is bypassed, for example because of VLAN acceleration.
2145 */
2146void netif_nit_deliver(struct sk_buff *skb)
2147{
2148 struct packet_type *ptype;
2149
2150 if (list_empty(&ptype_all))
2151 return;
2152
2153 skb_reset_network_header(skb);
2154 skb_reset_transport_header(skb);
2155 skb->mac_len = skb->network_header - skb->mac_header;
2156
2157 rcu_read_lock();
2158 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2159 if (!ptype->dev || ptype->dev == skb->dev)
2160 deliver_skb(skb, ptype, skb->dev);
2161 }
2162 rcu_read_unlock();
2163}
2164
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002165/**
2166 * netif_receive_skb - process receive buffer from network
2167 * @skb: buffer to process
2168 *
2169 * netif_receive_skb() is the main receive data processing function.
2170 * It always succeeds. The buffer may be dropped during processing
2171 * for congestion control or by the protocol layers.
2172 *
2173 * This function may only be called from softirq context and interrupts
2174 * should be enabled.
2175 *
2176 * Return values (usually ignored):
2177 * NET_RX_SUCCESS: no congestion
2178 * NET_RX_DROP: packet was dropped
2179 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180int netif_receive_skb(struct sk_buff *skb)
2181{
2182 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002183 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002184 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 int ret = NET_RX_DROP;
Al Viro252e33462006-11-14 20:48:11 -08002186 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002189 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 return NET_RX_DROP;
2191
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002192 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002193 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Patrick McHardyc01003c2007-03-29 11:46:52 -07002195 if (!skb->iif)
2196 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002197
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002198 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002199 orig_dev = skb->dev;
2200 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002201 if (skb_bond_should_drop(skb))
2202 null_or_orig = orig_dev; /* deliver only exact match */
2203 else
2204 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002205 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 __get_cpu_var(netdev_rx_stat).total++;
2208
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002209 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002210 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002211 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 pt_prev = NULL;
2214
2215 rcu_read_lock();
2216
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002217 /* Don't receive packets in an exiting network namespace */
2218 if (!net_alive(dev_net(skb->dev)))
2219 goto out;
2220
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221#ifdef CONFIG_NET_CLS_ACT
2222 if (skb->tc_verd & TC_NCLS) {
2223 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2224 goto ncls;
2225 }
2226#endif
2227
2228 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002229 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2230 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002231 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002232 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 pt_prev = ptype;
2234 }
2235 }
2236
2237#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002238 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2239 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241ncls:
2242#endif
2243
Stephen Hemminger6229e362007-03-21 13:38:47 -07002244 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2245 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002247 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2248 if (!skb)
2249 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
2251 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002252 list_for_each_entry_rcu(ptype,
2253 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002255 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2256 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002257 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002258 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 pt_prev = ptype;
2260 }
2261 }
2262
2263 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002264 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 } else {
2266 kfree_skb(skb);
2267 /* Jamal, now you will not able to escape explaining
2268 * me how you were going to use this. :-)
2269 */
2270 ret = NET_RX_DROP;
2271 }
2272
2273out:
2274 rcu_read_unlock();
2275 return ret;
2276}
2277
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002278/* Network device is going away, flush any packets still pending */
2279static void flush_backlog(void *arg)
2280{
2281 struct net_device *dev = arg;
2282 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2283 struct sk_buff *skb, *tmp;
2284
2285 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2286 if (skb->dev == dev) {
2287 __skb_unlink(skb, &queue->input_pkt_queue);
2288 kfree_skb(skb);
2289 }
2290}
2291
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002292static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293{
2294 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2296 unsigned long start_time = jiffies;
2297
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002298 napi->weight = weight_p;
2299 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 local_irq_disable();
2303 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002304 if (!skb) {
2305 __napi_complete(napi);
2306 local_irq_enable();
2307 break;
2308 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 local_irq_enable();
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002312 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002314 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315}
2316
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002317/**
2318 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002319 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002320 *
2321 * The entry's receive function will be scheduled to run
2322 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002323void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002324{
2325 unsigned long flags;
2326
2327 local_irq_save(flags);
2328 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2329 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2330 local_irq_restore(flags);
2331}
2332EXPORT_SYMBOL(__napi_schedule);
2333
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335static void net_rx_action(struct softirq_action *h)
2336{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002337 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002339 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002340 void *have;
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 local_irq_disable();
2343
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002344 while (!list_empty(list)) {
2345 struct napi_struct *n;
2346 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002348 /* If softirq window is exhuasted then punt.
2349 *
2350 * Note that this is a slight policy change from the
2351 * previous NAPI code, which would allow up to 2
2352 * jiffies to pass before breaking out. The test
2353 * used to be "jiffies - start_time > 1".
2354 */
2355 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 goto softnet_break;
2357
2358 local_irq_enable();
2359
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002360 /* Even though interrupts have been re-enabled, this
2361 * access is safe because interrupts can only add new
2362 * entries to the tail of this list, and only ->poll()
2363 * calls can remove this head entry from the list.
2364 */
2365 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002367 have = netpoll_poll_lock(n);
2368
2369 weight = n->weight;
2370
David S. Miller0a7606c2007-10-29 21:28:47 -07002371 /* This NAPI_STATE_SCHED test is for avoiding a race
2372 * with netpoll's poll_napi(). Only the entity which
2373 * obtains the lock and sees NAPI_STATE_SCHED set will
2374 * actually make the ->poll() call. Therefore we avoid
2375 * accidently calling ->poll() when NAPI is not scheduled.
2376 */
2377 work = 0;
2378 if (test_bit(NAPI_STATE_SCHED, &n->state))
2379 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002380
2381 WARN_ON_ONCE(work > weight);
2382
2383 budget -= work;
2384
2385 local_irq_disable();
2386
2387 /* Drivers must not modify the NAPI state if they
2388 * consume the entire weight. In such cases this code
2389 * still "owns" the NAPI instance and therefore can
2390 * move the instance around on the list at-will.
2391 */
David S. Millerfed17f32008-01-07 21:00:40 -08002392 if (unlikely(work == weight)) {
2393 if (unlikely(napi_disable_pending(n)))
2394 __napi_complete(n);
2395 else
2396 list_move_tail(&n->poll_list, list);
2397 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002398
2399 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 }
2401out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002402 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002403
Chris Leechdb217332006-06-17 21:24:58 -07002404#ifdef CONFIG_NET_DMA
2405 /*
2406 * There may not be any more sk_buffs coming right now, so push
2407 * any pending DMA copies to hardware
2408 */
Dan Williamsd379b012007-07-09 11:56:42 -07002409 if (!cpus_empty(net_dma.channel_mask)) {
2410 int chan_idx;
Mike Travis0e12f842008-05-12 21:21:13 +02002411 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07002412 struct dma_chan *chan = net_dma.channels[chan_idx];
2413 if (chan)
2414 dma_async_memcpy_issue_pending(chan);
2415 }
Chris Leechdb217332006-06-17 21:24:58 -07002416 }
2417#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 return;
2420
2421softnet_break:
2422 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2423 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2424 goto out;
2425}
2426
2427static gifconf_func_t * gifconf_list [NPROTO];
2428
2429/**
2430 * register_gifconf - register a SIOCGIF handler
2431 * @family: Address family
2432 * @gifconf: Function handler
2433 *
2434 * Register protocol dependent address dumping routines. The handler
2435 * that is passed must not be freed or reused until it has been replaced
2436 * by another handler.
2437 */
2438int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2439{
2440 if (family >= NPROTO)
2441 return -EINVAL;
2442 gifconf_list[family] = gifconf;
2443 return 0;
2444}
2445
2446
2447/*
2448 * Map an interface index to its name (SIOCGIFNAME)
2449 */
2450
2451/*
2452 * We need this ioctl for efficient implementation of the
2453 * if_indextoname() function required by the IPv6 API. Without
2454 * it, we would have to search all the interfaces to find a
2455 * match. --pb
2456 */
2457
Eric W. Biederman881d9662007-09-17 11:56:21 -07002458static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459{
2460 struct net_device *dev;
2461 struct ifreq ifr;
2462
2463 /*
2464 * Fetch the caller's info block.
2465 */
2466
2467 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2468 return -EFAULT;
2469
2470 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002471 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 if (!dev) {
2473 read_unlock(&dev_base_lock);
2474 return -ENODEV;
2475 }
2476
2477 strcpy(ifr.ifr_name, dev->name);
2478 read_unlock(&dev_base_lock);
2479
2480 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2481 return -EFAULT;
2482 return 0;
2483}
2484
2485/*
2486 * Perform a SIOCGIFCONF call. This structure will change
2487 * size eventually, and there is nothing I can do about it.
2488 * Thus we will need a 'compatibility mode'.
2489 */
2490
Eric W. Biederman881d9662007-09-17 11:56:21 -07002491static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492{
2493 struct ifconf ifc;
2494 struct net_device *dev;
2495 char __user *pos;
2496 int len;
2497 int total;
2498 int i;
2499
2500 /*
2501 * Fetch the caller's info block.
2502 */
2503
2504 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2505 return -EFAULT;
2506
2507 pos = ifc.ifc_buf;
2508 len = ifc.ifc_len;
2509
2510 /*
2511 * Loop over the interfaces, and write an info block for each.
2512 */
2513
2514 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002515 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 for (i = 0; i < NPROTO; i++) {
2517 if (gifconf_list[i]) {
2518 int done;
2519 if (!pos)
2520 done = gifconf_list[i](dev, NULL, 0);
2521 else
2522 done = gifconf_list[i](dev, pos + total,
2523 len - total);
2524 if (done < 0)
2525 return -EFAULT;
2526 total += done;
2527 }
2528 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 /*
2532 * All done. Write the updated control block back to the caller.
2533 */
2534 ifc.ifc_len = total;
2535
2536 /*
2537 * Both BSD and Solaris return 0 here, so we do too.
2538 */
2539 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2540}
2541
2542#ifdef CONFIG_PROC_FS
2543/*
2544 * This is invoked by the /proc filesystem handler to display a device
2545 * in detail.
2546 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002548 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549{
Denis V. Luneve372c412007-11-19 22:31:54 -08002550 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002551 loff_t off;
2552 struct net_device *dev;
2553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002555 if (!*pos)
2556 return SEQ_START_TOKEN;
2557
2558 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002559 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002560 if (off++ == *pos)
2561 return dev;
2562
2563 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564}
2565
2566void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2567{
Denis V. Luneve372c412007-11-19 22:31:54 -08002568 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002570 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002571 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
2574void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002575 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576{
2577 read_unlock(&dev_base_lock);
2578}
2579
2580static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2581{
Rusty Russellc45d2862007-03-28 14:29:08 -07002582 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583
Rusty Russell5a1b5892007-04-28 21:04:03 -07002584 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2585 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2586 dev->name, stats->rx_bytes, stats->rx_packets,
2587 stats->rx_errors,
2588 stats->rx_dropped + stats->rx_missed_errors,
2589 stats->rx_fifo_errors,
2590 stats->rx_length_errors + stats->rx_over_errors +
2591 stats->rx_crc_errors + stats->rx_frame_errors,
2592 stats->rx_compressed, stats->multicast,
2593 stats->tx_bytes, stats->tx_packets,
2594 stats->tx_errors, stats->tx_dropped,
2595 stats->tx_fifo_errors, stats->collisions,
2596 stats->tx_carrier_errors +
2597 stats->tx_aborted_errors +
2598 stats->tx_window_errors +
2599 stats->tx_heartbeat_errors,
2600 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601}
2602
2603/*
2604 * Called from the PROCfs module. This now uses the new arbitrary sized
2605 * /proc/net interface to create /proc/net/dev
2606 */
2607static int dev_seq_show(struct seq_file *seq, void *v)
2608{
2609 if (v == SEQ_START_TOKEN)
2610 seq_puts(seq, "Inter-| Receive "
2611 " | Transmit\n"
2612 " face |bytes packets errs drop fifo frame "
2613 "compressed multicast|bytes packets errs "
2614 "drop fifo colls carrier compressed\n");
2615 else
2616 dev_seq_printf_stats(seq, v);
2617 return 0;
2618}
2619
2620static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2621{
2622 struct netif_rx_stats *rc = NULL;
2623
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002624 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002625 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 rc = &per_cpu(netdev_rx_stat, *pos);
2627 break;
2628 } else
2629 ++*pos;
2630 return rc;
2631}
2632
2633static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2634{
2635 return softnet_get_online(pos);
2636}
2637
2638static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2639{
2640 ++*pos;
2641 return softnet_get_online(pos);
2642}
2643
2644static void softnet_seq_stop(struct seq_file *seq, void *v)
2645{
2646}
2647
2648static int softnet_seq_show(struct seq_file *seq, void *v)
2649{
2650 struct netif_rx_stats *s = v;
2651
2652 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002653 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002654 0, 0, 0, 0, /* was fastroute */
2655 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 return 0;
2657}
2658
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002659static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 .start = dev_seq_start,
2661 .next = dev_seq_next,
2662 .stop = dev_seq_stop,
2663 .show = dev_seq_show,
2664};
2665
2666static int dev_seq_open(struct inode *inode, struct file *file)
2667{
Denis V. Luneve372c412007-11-19 22:31:54 -08002668 return seq_open_net(inode, file, &dev_seq_ops,
2669 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670}
2671
Arjan van de Ven9a321442007-02-12 00:55:35 -08002672static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 .owner = THIS_MODULE,
2674 .open = dev_seq_open,
2675 .read = seq_read,
2676 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002677 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678};
2679
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002680static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 .start = softnet_seq_start,
2682 .next = softnet_seq_next,
2683 .stop = softnet_seq_stop,
2684 .show = softnet_seq_show,
2685};
2686
2687static int softnet_seq_open(struct inode *inode, struct file *file)
2688{
2689 return seq_open(file, &softnet_seq_ops);
2690}
2691
Arjan van de Ven9a321442007-02-12 00:55:35 -08002692static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 .owner = THIS_MODULE,
2694 .open = softnet_seq_open,
2695 .read = seq_read,
2696 .llseek = seq_lseek,
2697 .release = seq_release,
2698};
2699
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002700static void *ptype_get_idx(loff_t pos)
2701{
2702 struct packet_type *pt = NULL;
2703 loff_t i = 0;
2704 int t;
2705
2706 list_for_each_entry_rcu(pt, &ptype_all, list) {
2707 if (i == pos)
2708 return pt;
2709 ++i;
2710 }
2711
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002712 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002713 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2714 if (i == pos)
2715 return pt;
2716 ++i;
2717 }
2718 }
2719 return NULL;
2720}
2721
2722static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002723 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002724{
2725 rcu_read_lock();
2726 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2727}
2728
2729static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2730{
2731 struct packet_type *pt;
2732 struct list_head *nxt;
2733 int hash;
2734
2735 ++*pos;
2736 if (v == SEQ_START_TOKEN)
2737 return ptype_get_idx(0);
2738
2739 pt = v;
2740 nxt = pt->list.next;
2741 if (pt->type == htons(ETH_P_ALL)) {
2742 if (nxt != &ptype_all)
2743 goto found;
2744 hash = 0;
2745 nxt = ptype_base[0].next;
2746 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002747 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002748
2749 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002750 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002751 return NULL;
2752 nxt = ptype_base[hash].next;
2753 }
2754found:
2755 return list_entry(nxt, struct packet_type, list);
2756}
2757
2758static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002759 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002760{
2761 rcu_read_unlock();
2762}
2763
2764static void ptype_seq_decode(struct seq_file *seq, void *sym)
2765{
2766#ifdef CONFIG_KALLSYMS
2767 unsigned long offset = 0, symsize;
2768 const char *symname;
2769 char *modname;
2770 char namebuf[128];
2771
2772 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2773 &modname, namebuf);
2774
2775 if (symname) {
2776 char *delim = ":";
2777
2778 if (!modname)
2779 modname = delim = "";
2780 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2781 symname, offset);
2782 return;
2783 }
2784#endif
2785
2786 seq_printf(seq, "[%p]", sym);
2787}
2788
2789static int ptype_seq_show(struct seq_file *seq, void *v)
2790{
2791 struct packet_type *pt = v;
2792
2793 if (v == SEQ_START_TOKEN)
2794 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002795 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002796 if (pt->type == htons(ETH_P_ALL))
2797 seq_puts(seq, "ALL ");
2798 else
2799 seq_printf(seq, "%04x", ntohs(pt->type));
2800
2801 seq_printf(seq, " %-8s ",
2802 pt->dev ? pt->dev->name : "");
2803 ptype_seq_decode(seq, pt->func);
2804 seq_putc(seq, '\n');
2805 }
2806
2807 return 0;
2808}
2809
2810static const struct seq_operations ptype_seq_ops = {
2811 .start = ptype_seq_start,
2812 .next = ptype_seq_next,
2813 .stop = ptype_seq_stop,
2814 .show = ptype_seq_show,
2815};
2816
2817static int ptype_seq_open(struct inode *inode, struct file *file)
2818{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002819 return seq_open_net(inode, file, &ptype_seq_ops,
2820 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002821}
2822
2823static const struct file_operations ptype_seq_fops = {
2824 .owner = THIS_MODULE,
2825 .open = ptype_seq_open,
2826 .read = seq_read,
2827 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002828 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002829};
2830
2831
Pavel Emelyanov46650792007-10-08 20:38:39 -07002832static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833{
2834 int rc = -ENOMEM;
2835
Eric W. Biederman881d9662007-09-17 11:56:21 -07002836 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002838 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002840 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002841 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002842
Eric W. Biederman881d9662007-09-17 11:56:21 -07002843 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002844 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 rc = 0;
2846out:
2847 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002848out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002849 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002851 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002853 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 goto out;
2855}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002856
Pavel Emelyanov46650792007-10-08 20:38:39 -07002857static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002858{
2859 wext_proc_exit(net);
2860
2861 proc_net_remove(net, "ptype");
2862 proc_net_remove(net, "softnet_stat");
2863 proc_net_remove(net, "dev");
2864}
2865
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002866static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002867 .init = dev_proc_net_init,
2868 .exit = dev_proc_net_exit,
2869};
2870
2871static int __init dev_proc_init(void)
2872{
2873 return register_pernet_subsys(&dev_proc_ops);
2874}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875#else
2876#define dev_proc_init() 0
2877#endif /* CONFIG_PROC_FS */
2878
2879
2880/**
2881 * netdev_set_master - set up master/slave pair
2882 * @slave: slave device
2883 * @master: new master device
2884 *
2885 * Changes the master device of the slave. Pass %NULL to break the
2886 * bonding. The caller must hold the RTNL semaphore. On a failure
2887 * a negative errno code is returned. On success the reference counts
2888 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2889 * function returns zero.
2890 */
2891int netdev_set_master(struct net_device *slave, struct net_device *master)
2892{
2893 struct net_device *old = slave->master;
2894
2895 ASSERT_RTNL();
2896
2897 if (master) {
2898 if (old)
2899 return -EBUSY;
2900 dev_hold(master);
2901 }
2902
2903 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002904
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 synchronize_net();
2906
2907 if (old)
2908 dev_put(old);
2909
2910 if (master)
2911 slave->flags |= IFF_SLAVE;
2912 else
2913 slave->flags &= ~IFF_SLAVE;
2914
2915 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2916 return 0;
2917}
2918
Wang Chendad9b332008-06-18 01:48:28 -07002919static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002920{
2921 unsigned short old_flags = dev->flags;
2922
Patrick McHardy24023452007-07-14 18:51:31 -07002923 ASSERT_RTNL();
2924
Wang Chendad9b332008-06-18 01:48:28 -07002925 dev->flags |= IFF_PROMISC;
2926 dev->promiscuity += inc;
2927 if (dev->promiscuity == 0) {
2928 /*
2929 * Avoid overflow.
2930 * If inc causes overflow, untouch promisc and return error.
2931 */
2932 if (inc < 0)
2933 dev->flags &= ~IFF_PROMISC;
2934 else {
2935 dev->promiscuity -= inc;
2936 printk(KERN_WARNING "%s: promiscuity touches roof, "
2937 "set promiscuity failed, promiscuity feature "
2938 "of device might be broken.\n", dev->name);
2939 return -EOVERFLOW;
2940 }
2941 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002942 if (dev->flags != old_flags) {
2943 printk(KERN_INFO "device %s %s promiscuous mode\n",
2944 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2945 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002946 if (audit_enabled)
2947 audit_log(current->audit_context, GFP_ATOMIC,
2948 AUDIT_ANOM_PROMISCUOUS,
2949 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2950 dev->name, (dev->flags & IFF_PROMISC),
2951 (old_flags & IFF_PROMISC),
2952 audit_get_loginuid(current),
2953 current->uid, current->gid,
2954 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002955
2956 if (dev->change_rx_flags)
2957 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002958 }
Wang Chendad9b332008-06-18 01:48:28 -07002959 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002960}
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962/**
2963 * dev_set_promiscuity - update promiscuity count on a device
2964 * @dev: device
2965 * @inc: modifier
2966 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002967 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 * remains above zero the interface remains promiscuous. Once it hits zero
2969 * the device reverts back to normal filtering operation. A negative inc
2970 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002971 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 */
Wang Chendad9b332008-06-18 01:48:28 -07002973int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974{
2975 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002976 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977
Wang Chendad9b332008-06-18 01:48:28 -07002978 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002979 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002980 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002981 if (dev->flags != old_flags)
2982 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002983 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984}
2985
2986/**
2987 * dev_set_allmulti - update allmulti count on a device
2988 * @dev: device
2989 * @inc: modifier
2990 *
2991 * Add or remove reception of all multicast frames to a device. While the
2992 * count in the device remains above zero the interface remains listening
2993 * to all interfaces. Once it hits zero the device reverts back to normal
2994 * filtering operation. A negative @inc value is used to drop the counter
2995 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002996 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 */
2998
Wang Chendad9b332008-06-18 01:48:28 -07002999int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000{
3001 unsigned short old_flags = dev->flags;
3002
Patrick McHardy24023452007-07-14 18:51:31 -07003003 ASSERT_RTNL();
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003006 dev->allmulti += inc;
3007 if (dev->allmulti == 0) {
3008 /*
3009 * Avoid overflow.
3010 * If inc causes overflow, untouch allmulti and return error.
3011 */
3012 if (inc < 0)
3013 dev->flags &= ~IFF_ALLMULTI;
3014 else {
3015 dev->allmulti -= inc;
3016 printk(KERN_WARNING "%s: allmulti touches roof, "
3017 "set allmulti failed, allmulti feature of "
3018 "device might be broken.\n", dev->name);
3019 return -EOVERFLOW;
3020 }
3021 }
Patrick McHardy24023452007-07-14 18:51:31 -07003022 if (dev->flags ^ old_flags) {
3023 if (dev->change_rx_flags)
3024 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003025 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003026 }
Wang Chendad9b332008-06-18 01:48:28 -07003027 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003028}
3029
3030/*
3031 * Upload unicast and multicast address lists to device and
3032 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003033 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003034 * are present.
3035 */
3036void __dev_set_rx_mode(struct net_device *dev)
3037{
3038 /* dev_open will call this function so the list will stay sane. */
3039 if (!(dev->flags&IFF_UP))
3040 return;
3041
3042 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003043 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003044
3045 if (dev->set_rx_mode)
3046 dev->set_rx_mode(dev);
3047 else {
3048 /* Unicast addresses changes may only happen under the rtnl,
3049 * therefore calling __dev_set_promiscuity here is safe.
3050 */
3051 if (dev->uc_count > 0 && !dev->uc_promisc) {
3052 __dev_set_promiscuity(dev, 1);
3053 dev->uc_promisc = 1;
3054 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3055 __dev_set_promiscuity(dev, -1);
3056 dev->uc_promisc = 0;
3057 }
3058
3059 if (dev->set_multicast_list)
3060 dev->set_multicast_list(dev);
3061 }
3062}
3063
3064void dev_set_rx_mode(struct net_device *dev)
3065{
David S. Millerb9e40852008-07-15 00:15:08 -07003066 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003067 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003068 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069}
3070
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003071int __dev_addr_delete(struct dev_addr_list **list, int *count,
3072 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003073{
3074 struct dev_addr_list *da;
3075
3076 for (; (da = *list) != NULL; list = &da->next) {
3077 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3078 alen == da->da_addrlen) {
3079 if (glbl) {
3080 int old_glbl = da->da_gusers;
3081 da->da_gusers = 0;
3082 if (old_glbl == 0)
3083 break;
3084 }
3085 if (--da->da_users)
3086 return 0;
3087
3088 *list = da->next;
3089 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003090 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003091 return 0;
3092 }
3093 }
3094 return -ENOENT;
3095}
3096
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003097int __dev_addr_add(struct dev_addr_list **list, int *count,
3098 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003099{
3100 struct dev_addr_list *da;
3101
3102 for (da = *list; da != NULL; da = da->next) {
3103 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3104 da->da_addrlen == alen) {
3105 if (glbl) {
3106 int old_glbl = da->da_gusers;
3107 da->da_gusers = 1;
3108 if (old_glbl)
3109 return 0;
3110 }
3111 da->da_users++;
3112 return 0;
3113 }
3114 }
3115
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003116 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003117 if (da == NULL)
3118 return -ENOMEM;
3119 memcpy(da->da_addr, addr, alen);
3120 da->da_addrlen = alen;
3121 da->da_users = 1;
3122 da->da_gusers = glbl ? 1 : 0;
3123 da->next = *list;
3124 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003125 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003126 return 0;
3127}
3128
Patrick McHardy4417da62007-06-27 01:28:10 -07003129/**
3130 * dev_unicast_delete - Release secondary unicast address.
3131 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003132 * @addr: address to delete
3133 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003134 *
3135 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003136 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003137 *
3138 * The caller must hold the rtnl_mutex.
3139 */
3140int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3141{
3142 int err;
3143
3144 ASSERT_RTNL();
3145
David S. Millerb9e40852008-07-15 00:15:08 -07003146 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003147 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3148 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003149 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003150 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003151 return err;
3152}
3153EXPORT_SYMBOL(dev_unicast_delete);
3154
3155/**
3156 * dev_unicast_add - add a secondary unicast address
3157 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003158 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003159 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003160 *
3161 * Add a secondary unicast address to the device or increase
3162 * the reference count if it already exists.
3163 *
3164 * The caller must hold the rtnl_mutex.
3165 */
3166int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3167{
3168 int err;
3169
3170 ASSERT_RTNL();
3171
David S. Millerb9e40852008-07-15 00:15:08 -07003172 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003173 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3174 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003175 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003176 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003177 return err;
3178}
3179EXPORT_SYMBOL(dev_unicast_add);
3180
Chris Leeche83a2ea2008-01-31 16:53:23 -08003181int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3182 struct dev_addr_list **from, int *from_count)
3183{
3184 struct dev_addr_list *da, *next;
3185 int err = 0;
3186
3187 da = *from;
3188 while (da != NULL) {
3189 next = da->next;
3190 if (!da->da_synced) {
3191 err = __dev_addr_add(to, to_count,
3192 da->da_addr, da->da_addrlen, 0);
3193 if (err < 0)
3194 break;
3195 da->da_synced = 1;
3196 da->da_users++;
3197 } else if (da->da_users == 1) {
3198 __dev_addr_delete(to, to_count,
3199 da->da_addr, da->da_addrlen, 0);
3200 __dev_addr_delete(from, from_count,
3201 da->da_addr, da->da_addrlen, 0);
3202 }
3203 da = next;
3204 }
3205 return err;
3206}
3207
3208void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3209 struct dev_addr_list **from, int *from_count)
3210{
3211 struct dev_addr_list *da, *next;
3212
3213 da = *from;
3214 while (da != NULL) {
3215 next = da->next;
3216 if (da->da_synced) {
3217 __dev_addr_delete(to, to_count,
3218 da->da_addr, da->da_addrlen, 0);
3219 da->da_synced = 0;
3220 __dev_addr_delete(from, from_count,
3221 da->da_addr, da->da_addrlen, 0);
3222 }
3223 da = next;
3224 }
3225}
3226
3227/**
3228 * dev_unicast_sync - Synchronize device's unicast list to another device
3229 * @to: destination device
3230 * @from: source device
3231 *
3232 * Add newly added addresses to the destination device and release
3233 * addresses that have no users left. The source device must be
3234 * locked by netif_tx_lock_bh.
3235 *
3236 * This function is intended to be called from the dev->set_rx_mode
3237 * function of layered software devices.
3238 */
3239int dev_unicast_sync(struct net_device *to, struct net_device *from)
3240{
3241 int err = 0;
3242
David S. Millerb9e40852008-07-15 00:15:08 -07003243 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003244 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3245 &from->uc_list, &from->uc_count);
3246 if (!err)
3247 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003248 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003249 return err;
3250}
3251EXPORT_SYMBOL(dev_unicast_sync);
3252
3253/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003254 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003255 * @to: destination device
3256 * @from: source device
3257 *
3258 * Remove all addresses that were added to the destination device by
3259 * dev_unicast_sync(). This function is intended to be called from the
3260 * dev->stop function of layered software devices.
3261 */
3262void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3263{
David S. Millerb9e40852008-07-15 00:15:08 -07003264 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003265 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003266
3267 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3268 &from->uc_list, &from->uc_count);
3269 __dev_set_rx_mode(to);
3270
David S. Millere308a5d2008-07-15 00:13:44 -07003271 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003272 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003273}
3274EXPORT_SYMBOL(dev_unicast_unsync);
3275
Denis Cheng12972622007-07-18 02:12:56 -07003276static void __dev_addr_discard(struct dev_addr_list **list)
3277{
3278 struct dev_addr_list *tmp;
3279
3280 while (*list != NULL) {
3281 tmp = *list;
3282 *list = tmp->next;
3283 if (tmp->da_users > tmp->da_gusers)
3284 printk("__dev_addr_discard: address leakage! "
3285 "da_users=%d\n", tmp->da_users);
3286 kfree(tmp);
3287 }
3288}
3289
Denis Cheng26cc2522007-07-18 02:12:03 -07003290static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003291{
David S. Millerb9e40852008-07-15 00:15:08 -07003292 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003293
Patrick McHardy4417da62007-06-27 01:28:10 -07003294 __dev_addr_discard(&dev->uc_list);
3295 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003296
Denis Cheng456ad752007-07-18 02:10:54 -07003297 __dev_addr_discard(&dev->mc_list);
3298 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003299
David S. Millerb9e40852008-07-15 00:15:08 -07003300 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003301}
3302
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303unsigned dev_get_flags(const struct net_device *dev)
3304{
3305 unsigned flags;
3306
3307 flags = (dev->flags & ~(IFF_PROMISC |
3308 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003309 IFF_RUNNING |
3310 IFF_LOWER_UP |
3311 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 (dev->gflags & (IFF_PROMISC |
3313 IFF_ALLMULTI));
3314
Stefan Rompfb00055a2006-03-20 17:09:11 -08003315 if (netif_running(dev)) {
3316 if (netif_oper_up(dev))
3317 flags |= IFF_RUNNING;
3318 if (netif_carrier_ok(dev))
3319 flags |= IFF_LOWER_UP;
3320 if (netif_dormant(dev))
3321 flags |= IFF_DORMANT;
3322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323
3324 return flags;
3325}
3326
3327int dev_change_flags(struct net_device *dev, unsigned flags)
3328{
Thomas Graf7c355f52007-06-05 16:03:03 -07003329 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 int old_flags = dev->flags;
3331
Patrick McHardy24023452007-07-14 18:51:31 -07003332 ASSERT_RTNL();
3333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 /*
3335 * Set the flags on our device.
3336 */
3337
3338 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3339 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3340 IFF_AUTOMEDIA)) |
3341 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3342 IFF_ALLMULTI));
3343
3344 /*
3345 * Load in the correct multicast list now the flags have changed.
3346 */
3347
David Woodhouse0e917962008-05-20 14:36:14 -07003348 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003349 dev->change_rx_flags(dev, IFF_MULTICAST);
3350
Patrick McHardy4417da62007-06-27 01:28:10 -07003351 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
3353 /*
3354 * Have we downed the interface. We handle IFF_UP ourselves
3355 * according to user attempts to set it, rather than blindly
3356 * setting it.
3357 */
3358
3359 ret = 0;
3360 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3361 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3362
3363 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003364 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 }
3366
3367 if (dev->flags & IFF_UP &&
3368 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3369 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003370 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371
3372 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3373 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3374 dev->gflags ^= IFF_PROMISC;
3375 dev_set_promiscuity(dev, inc);
3376 }
3377
3378 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3379 is important. Some (broken) drivers set IFF_PROMISC, when
3380 IFF_ALLMULTI is requested not asking us and not reporting.
3381 */
3382 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3383 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3384 dev->gflags ^= IFF_ALLMULTI;
3385 dev_set_allmulti(dev, inc);
3386 }
3387
Thomas Graf7c355f52007-06-05 16:03:03 -07003388 /* Exclude state transition flags, already notified */
3389 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3390 if (changes)
3391 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392
3393 return ret;
3394}
3395
3396int dev_set_mtu(struct net_device *dev, int new_mtu)
3397{
3398 int err;
3399
3400 if (new_mtu == dev->mtu)
3401 return 0;
3402
3403 /* MTU must be positive. */
3404 if (new_mtu < 0)
3405 return -EINVAL;
3406
3407 if (!netif_device_present(dev))
3408 return -ENODEV;
3409
3410 err = 0;
3411 if (dev->change_mtu)
3412 err = dev->change_mtu(dev, new_mtu);
3413 else
3414 dev->mtu = new_mtu;
3415 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003416 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 return err;
3418}
3419
3420int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3421{
3422 int err;
3423
3424 if (!dev->set_mac_address)
3425 return -EOPNOTSUPP;
3426 if (sa->sa_family != dev->type)
3427 return -EINVAL;
3428 if (!netif_device_present(dev))
3429 return -ENODEV;
3430 err = dev->set_mac_address(dev, sa);
3431 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003432 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 return err;
3434}
3435
3436/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003437 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003439static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440{
3441 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003442 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443
3444 if (!dev)
3445 return -ENODEV;
3446
3447 switch (cmd) {
3448 case SIOCGIFFLAGS: /* Get interface flags */
3449 ifr->ifr_flags = dev_get_flags(dev);
3450 return 0;
3451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 case SIOCGIFMETRIC: /* Get the metric on the interface
3453 (currently unused) */
3454 ifr->ifr_metric = 0;
3455 return 0;
3456
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457 case SIOCGIFMTU: /* Get the MTU of a device */
3458 ifr->ifr_mtu = dev->mtu;
3459 return 0;
3460
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 case SIOCGIFHWADDR:
3462 if (!dev->addr_len)
3463 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3464 else
3465 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3466 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3467 ifr->ifr_hwaddr.sa_family = dev->type;
3468 return 0;
3469
Jeff Garzik14e3e072007-10-08 00:06:32 -07003470 case SIOCGIFSLAVE:
3471 err = -EINVAL;
3472 break;
3473
3474 case SIOCGIFMAP:
3475 ifr->ifr_map.mem_start = dev->mem_start;
3476 ifr->ifr_map.mem_end = dev->mem_end;
3477 ifr->ifr_map.base_addr = dev->base_addr;
3478 ifr->ifr_map.irq = dev->irq;
3479 ifr->ifr_map.dma = dev->dma;
3480 ifr->ifr_map.port = dev->if_port;
3481 return 0;
3482
3483 case SIOCGIFINDEX:
3484 ifr->ifr_ifindex = dev->ifindex;
3485 return 0;
3486
3487 case SIOCGIFTXQLEN:
3488 ifr->ifr_qlen = dev->tx_queue_len;
3489 return 0;
3490
3491 default:
3492 /* dev_ioctl() should ensure this case
3493 * is never reached
3494 */
3495 WARN_ON(1);
3496 err = -EINVAL;
3497 break;
3498
3499 }
3500 return err;
3501}
3502
3503/*
3504 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3505 */
3506static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3507{
3508 int err;
3509 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3510
3511 if (!dev)
3512 return -ENODEV;
3513
3514 switch (cmd) {
3515 case SIOCSIFFLAGS: /* Set interface flags */
3516 return dev_change_flags(dev, ifr->ifr_flags);
3517
3518 case SIOCSIFMETRIC: /* Set the metric on the interface
3519 (currently unused) */
3520 return -EOPNOTSUPP;
3521
3522 case SIOCSIFMTU: /* Set the MTU of a device */
3523 return dev_set_mtu(dev, ifr->ifr_mtu);
3524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 case SIOCSIFHWADDR:
3526 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3527
3528 case SIOCSIFHWBROADCAST:
3529 if (ifr->ifr_hwaddr.sa_family != dev->type)
3530 return -EINVAL;
3531 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3532 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003533 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 return 0;
3535
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 case SIOCSIFMAP:
3537 if (dev->set_config) {
3538 if (!netif_device_present(dev))
3539 return -ENODEV;
3540 return dev->set_config(dev, &ifr->ifr_map);
3541 }
3542 return -EOPNOTSUPP;
3543
3544 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003545 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3547 return -EINVAL;
3548 if (!netif_device_present(dev))
3549 return -ENODEV;
3550 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3551 dev->addr_len, 1);
3552
3553 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003554 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3556 return -EINVAL;
3557 if (!netif_device_present(dev))
3558 return -ENODEV;
3559 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3560 dev->addr_len, 1);
3561
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 case SIOCSIFTXQLEN:
3563 if (ifr->ifr_qlen < 0)
3564 return -EINVAL;
3565 dev->tx_queue_len = ifr->ifr_qlen;
3566 return 0;
3567
3568 case SIOCSIFNAME:
3569 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3570 return dev_change_name(dev, ifr->ifr_newname);
3571
3572 /*
3573 * Unknown or private ioctl
3574 */
3575
3576 default:
3577 if ((cmd >= SIOCDEVPRIVATE &&
3578 cmd <= SIOCDEVPRIVATE + 15) ||
3579 cmd == SIOCBONDENSLAVE ||
3580 cmd == SIOCBONDRELEASE ||
3581 cmd == SIOCBONDSETHWADDR ||
3582 cmd == SIOCBONDSLAVEINFOQUERY ||
3583 cmd == SIOCBONDINFOQUERY ||
3584 cmd == SIOCBONDCHANGEACTIVE ||
3585 cmd == SIOCGMIIPHY ||
3586 cmd == SIOCGMIIREG ||
3587 cmd == SIOCSMIIREG ||
3588 cmd == SIOCBRADDIF ||
3589 cmd == SIOCBRDELIF ||
3590 cmd == SIOCWANDEV) {
3591 err = -EOPNOTSUPP;
3592 if (dev->do_ioctl) {
3593 if (netif_device_present(dev))
3594 err = dev->do_ioctl(dev, ifr,
3595 cmd);
3596 else
3597 err = -ENODEV;
3598 }
3599 } else
3600 err = -EINVAL;
3601
3602 }
3603 return err;
3604}
3605
3606/*
3607 * This function handles all "interface"-type I/O control requests. The actual
3608 * 'doing' part of this is dev_ifsioc above.
3609 */
3610
3611/**
3612 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003613 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 * @cmd: command to issue
3615 * @arg: pointer to a struct ifreq in user space
3616 *
3617 * Issue ioctl functions to devices. This is normally called by the
3618 * user space syscall interfaces but can sometimes be useful for
3619 * other purposes. The return value is the return from the syscall if
3620 * positive or a negative errno code on error.
3621 */
3622
Eric W. Biederman881d9662007-09-17 11:56:21 -07003623int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624{
3625 struct ifreq ifr;
3626 int ret;
3627 char *colon;
3628
3629 /* One special case: SIOCGIFCONF takes ifconf argument
3630 and requires shared lock, because it sleeps writing
3631 to user space.
3632 */
3633
3634 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003635 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003636 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003637 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 return ret;
3639 }
3640 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003641 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642
3643 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3644 return -EFAULT;
3645
3646 ifr.ifr_name[IFNAMSIZ-1] = 0;
3647
3648 colon = strchr(ifr.ifr_name, ':');
3649 if (colon)
3650 *colon = 0;
3651
3652 /*
3653 * See which interface the caller is talking about.
3654 */
3655
3656 switch (cmd) {
3657 /*
3658 * These ioctl calls:
3659 * - can be done by all.
3660 * - atomic and do not require locking.
3661 * - return a value
3662 */
3663 case SIOCGIFFLAGS:
3664 case SIOCGIFMETRIC:
3665 case SIOCGIFMTU:
3666 case SIOCGIFHWADDR:
3667 case SIOCGIFSLAVE:
3668 case SIOCGIFMAP:
3669 case SIOCGIFINDEX:
3670 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003671 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003673 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 read_unlock(&dev_base_lock);
3675 if (!ret) {
3676 if (colon)
3677 *colon = ':';
3678 if (copy_to_user(arg, &ifr,
3679 sizeof(struct ifreq)))
3680 ret = -EFAULT;
3681 }
3682 return ret;
3683
3684 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003685 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003687 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 rtnl_unlock();
3689 if (!ret) {
3690 if (colon)
3691 *colon = ':';
3692 if (copy_to_user(arg, &ifr,
3693 sizeof(struct ifreq)))
3694 ret = -EFAULT;
3695 }
3696 return ret;
3697
3698 /*
3699 * These ioctl calls:
3700 * - require superuser power.
3701 * - require strict serialization.
3702 * - return a value
3703 */
3704 case SIOCGMIIPHY:
3705 case SIOCGMIIREG:
3706 case SIOCSIFNAME:
3707 if (!capable(CAP_NET_ADMIN))
3708 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003709 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003711 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 rtnl_unlock();
3713 if (!ret) {
3714 if (colon)
3715 *colon = ':';
3716 if (copy_to_user(arg, &ifr,
3717 sizeof(struct ifreq)))
3718 ret = -EFAULT;
3719 }
3720 return ret;
3721
3722 /*
3723 * These ioctl calls:
3724 * - require superuser power.
3725 * - require strict serialization.
3726 * - do not return a value
3727 */
3728 case SIOCSIFFLAGS:
3729 case SIOCSIFMETRIC:
3730 case SIOCSIFMTU:
3731 case SIOCSIFMAP:
3732 case SIOCSIFHWADDR:
3733 case SIOCSIFSLAVE:
3734 case SIOCADDMULTI:
3735 case SIOCDELMULTI:
3736 case SIOCSIFHWBROADCAST:
3737 case SIOCSIFTXQLEN:
3738 case SIOCSMIIREG:
3739 case SIOCBONDENSLAVE:
3740 case SIOCBONDRELEASE:
3741 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 case SIOCBONDCHANGEACTIVE:
3743 case SIOCBRADDIF:
3744 case SIOCBRDELIF:
3745 if (!capable(CAP_NET_ADMIN))
3746 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003747 /* fall through */
3748 case SIOCBONDSLAVEINFOQUERY:
3749 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003750 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003752 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 rtnl_unlock();
3754 return ret;
3755
3756 case SIOCGIFMEM:
3757 /* Get the per device memory space. We can add this but
3758 * currently do not support it */
3759 case SIOCSIFMEM:
3760 /* Set the per device memory buffer space.
3761 * Not applicable in our case */
3762 case SIOCSIFLINK:
3763 return -EINVAL;
3764
3765 /*
3766 * Unknown or private ioctl.
3767 */
3768 default:
3769 if (cmd == SIOCWANDEV ||
3770 (cmd >= SIOCDEVPRIVATE &&
3771 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003772 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003774 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 rtnl_unlock();
3776 if (!ret && copy_to_user(arg, &ifr,
3777 sizeof(struct ifreq)))
3778 ret = -EFAULT;
3779 return ret;
3780 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003782 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003783 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 return -EINVAL;
3785 }
3786}
3787
3788
3789/**
3790 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003791 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 *
3793 * Returns a suitable unique value for a new device interface
3794 * number. The caller must hold the rtnl semaphore or the
3795 * dev_base_lock to be sure it remains unique.
3796 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003797static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798{
3799 static int ifindex;
3800 for (;;) {
3801 if (++ifindex <= 0)
3802 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003803 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804 return ifindex;
3805 }
3806}
3807
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808/* Delayed registration/unregisteration */
3809static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003810static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003812static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813{
3814 spin_lock(&net_todo_list_lock);
3815 list_add_tail(&dev->todo_list, &net_todo_list);
3816 spin_unlock(&net_todo_list_lock);
3817}
3818
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003819static void rollback_registered(struct net_device *dev)
3820{
3821 BUG_ON(dev_boot_phase);
3822 ASSERT_RTNL();
3823
3824 /* Some devices call without registering for initialization unwind. */
3825 if (dev->reg_state == NETREG_UNINITIALIZED) {
3826 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3827 "was registered\n", dev->name, dev);
3828
3829 WARN_ON(1);
3830 return;
3831 }
3832
3833 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3834
3835 /* If device is running, close it first. */
3836 dev_close(dev);
3837
3838 /* And unlink it from device chain. */
3839 unlist_netdevice(dev);
3840
3841 dev->reg_state = NETREG_UNREGISTERING;
3842
3843 synchronize_net();
3844
3845 /* Shutdown queueing discipline. */
3846 dev_shutdown(dev);
3847
3848
3849 /* Notify protocols, that we are about to destroy
3850 this device. They should clean all the things.
3851 */
3852 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3853
3854 /*
3855 * Flush the unicast and multicast chains
3856 */
3857 dev_addr_discard(dev);
3858
3859 if (dev->uninit)
3860 dev->uninit(dev);
3861
3862 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07003863 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003864
3865 /* Remove entries from kobject tree */
3866 netdev_unregister_kobject(dev);
3867
3868 synchronize_net();
3869
3870 dev_put(dev);
3871}
3872
David S. Millere8a04642008-07-17 00:34:19 -07003873static void __netdev_init_queue_locks_one(struct net_device *dev,
3874 struct netdev_queue *dev_queue,
3875 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07003876{
3877 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003878 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07003879 dev_queue->xmit_lock_owner = -1;
3880}
3881
3882static void netdev_init_queue_locks(struct net_device *dev)
3883{
David S. Millere8a04642008-07-17 00:34:19 -07003884 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3885 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07003886}
3887
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888/**
3889 * register_netdevice - register a network device
3890 * @dev: device to register
3891 *
3892 * Take a completed network device structure and add it to the kernel
3893 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3894 * chain. 0 is returned on success. A negative errno code is returned
3895 * on a failure to set up the device, or if the name is a duplicate.
3896 *
3897 * Callers must hold the rtnl semaphore. You may want
3898 * register_netdev() instead of this.
3899 *
3900 * BUGS:
3901 * The locking appears insufficient to guarantee two parallel registers
3902 * will not get the same name.
3903 */
3904
3905int register_netdevice(struct net_device *dev)
3906{
3907 struct hlist_head *head;
3908 struct hlist_node *p;
3909 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003910 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911
3912 BUG_ON(dev_boot_phase);
3913 ASSERT_RTNL();
3914
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003915 might_sleep();
3916
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 /* When net_device's are persistent, this will be fatal. */
3918 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003919 BUG_ON(!dev_net(dev));
3920 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921
David S. Millerf1f28aa2008-07-15 00:08:33 -07003922 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07003923 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07003924 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 dev->iflink = -1;
3927
3928 /* Init, if this function is available */
3929 if (dev->init) {
3930 ret = dev->init(dev);
3931 if (ret) {
3932 if (ret > 0)
3933 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003934 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935 }
3936 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003937
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 if (!dev_valid_name(dev->name)) {
3939 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003940 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 }
3942
Eric W. Biederman881d9662007-09-17 11:56:21 -07003943 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 if (dev->iflink == -1)
3945 dev->iflink = dev->ifindex;
3946
3947 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003948 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 hlist_for_each(p, head) {
3950 struct net_device *d
3951 = hlist_entry(p, struct net_device, name_hlist);
3952 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3953 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003954 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003958 /* Fix illegal checksum combinations */
3959 if ((dev->features & NETIF_F_HW_CSUM) &&
3960 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3961 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3962 dev->name);
3963 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3964 }
3965
3966 if ((dev->features & NETIF_F_NO_CSUM) &&
3967 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3968 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3969 dev->name);
3970 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3971 }
3972
3973
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 /* Fix illegal SG+CSUM combinations. */
3975 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003976 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003977 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 dev->name);
3979 dev->features &= ~NETIF_F_SG;
3980 }
3981
3982 /* TSO requires that SG is present as well. */
3983 if ((dev->features & NETIF_F_TSO) &&
3984 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003985 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986 dev->name);
3987 dev->features &= ~NETIF_F_TSO;
3988 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003989 if (dev->features & NETIF_F_UFO) {
3990 if (!(dev->features & NETIF_F_HW_CSUM)) {
3991 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3992 "NETIF_F_HW_CSUM feature.\n",
3993 dev->name);
3994 dev->features &= ~NETIF_F_UFO;
3995 }
3996 if (!(dev->features & NETIF_F_SG)) {
3997 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3998 "NETIF_F_SG feature.\n",
3999 dev->name);
4000 dev->features &= ~NETIF_F_UFO;
4001 }
4002 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004004 /* Enable software GSO if SG is supported. */
4005 if (dev->features & NETIF_F_SG)
4006 dev->features |= NETIF_F_GSO;
4007
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004008 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004009 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004010 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004011 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004012 dev->reg_state = NETREG_REGISTERED;
4013
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014 /*
4015 * Default initial state at registry is that the
4016 * device is present.
4017 */
4018
4019 set_bit(__LINK_STATE_PRESENT, &dev->state);
4020
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004023 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024
4025 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004026 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004027 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004028 if (ret) {
4029 rollback_registered(dev);
4030 dev->reg_state = NETREG_UNREGISTERED;
4031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
4033out:
4034 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004035
4036err_uninit:
4037 if (dev->uninit)
4038 dev->uninit(dev);
4039 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040}
4041
4042/**
4043 * register_netdev - register a network device
4044 * @dev: device to register
4045 *
4046 * Take a completed network device structure and add it to the kernel
4047 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4048 * chain. 0 is returned on success. A negative errno code is returned
4049 * on a failure to set up the device, or if the name is a duplicate.
4050 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004051 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 * and expands the device name if you passed a format string to
4053 * alloc_netdev.
4054 */
4055int register_netdev(struct net_device *dev)
4056{
4057 int err;
4058
4059 rtnl_lock();
4060
4061 /*
4062 * If the name is a format string the caller wants us to do a
4063 * name allocation.
4064 */
4065 if (strchr(dev->name, '%')) {
4066 err = dev_alloc_name(dev, dev->name);
4067 if (err < 0)
4068 goto out;
4069 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004070
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 err = register_netdevice(dev);
4072out:
4073 rtnl_unlock();
4074 return err;
4075}
4076EXPORT_SYMBOL(register_netdev);
4077
4078/*
4079 * netdev_wait_allrefs - wait until all references are gone.
4080 *
4081 * This is called when unregistering network devices.
4082 *
4083 * Any protocol or device that holds a reference should register
4084 * for netdevice notification, and cleanup and put back the
4085 * reference if they receive an UNREGISTER event.
4086 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004087 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 */
4089static void netdev_wait_allrefs(struct net_device *dev)
4090{
4091 unsigned long rebroadcast_time, warning_time;
4092
4093 rebroadcast_time = warning_time = jiffies;
4094 while (atomic_read(&dev->refcnt) != 0) {
4095 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004096 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097
4098 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004099 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
4101 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4102 &dev->state)) {
4103 /* We must not have linkwatch events
4104 * pending on unregister. If this
4105 * happens, we simply run the queue
4106 * unscheduled, resulting in a noop
4107 * for this device.
4108 */
4109 linkwatch_run_queue();
4110 }
4111
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004112 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
4114 rebroadcast_time = jiffies;
4115 }
4116
4117 msleep(250);
4118
4119 if (time_after(jiffies, warning_time + 10 * HZ)) {
4120 printk(KERN_EMERG "unregister_netdevice: "
4121 "waiting for %s to become free. Usage "
4122 "count = %d\n",
4123 dev->name, atomic_read(&dev->refcnt));
4124 warning_time = jiffies;
4125 }
4126 }
4127}
4128
4129/* The sequence is:
4130 *
4131 * rtnl_lock();
4132 * ...
4133 * register_netdevice(x1);
4134 * register_netdevice(x2);
4135 * ...
4136 * unregister_netdevice(y1);
4137 * unregister_netdevice(y2);
4138 * ...
4139 * rtnl_unlock();
4140 * free_netdev(y1);
4141 * free_netdev(y2);
4142 *
4143 * We are invoked by rtnl_unlock() after it drops the semaphore.
4144 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004145 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 * without deadlocking with linkwatch via keventd.
4147 * 2) Since we run with the RTNL semaphore not held, we can sleep
4148 * safely in order to wait for the netdev refcnt to drop to zero.
4149 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004150static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151void netdev_run_todo(void)
4152{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004153 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154
4155 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004156 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
4158 /* Not safe to do outside the semaphore. We must not return
4159 * until all unregister events invoked by the local processor
4160 * have been completed (either by this todo run, or one on
4161 * another cpu).
4162 */
4163 if (list_empty(&net_todo_list))
4164 goto out;
4165
4166 /* Snapshot list, allow later requests */
4167 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004168 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004170
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 while (!list_empty(&list)) {
4172 struct net_device *dev
4173 = list_entry(list.next, struct net_device, todo_list);
4174 list_del(&dev->todo_list);
4175
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004176 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 printk(KERN_ERR "network todo '%s' but state %d\n",
4178 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004179 dump_stack();
4180 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004182
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004183 dev->reg_state = NETREG_UNREGISTERED;
4184
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004185 on_each_cpu(flush_backlog, dev, 1);
4186
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004187 netdev_wait_allrefs(dev);
4188
4189 /* paranoia */
4190 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004191 WARN_ON(dev->ip_ptr);
4192 WARN_ON(dev->ip6_ptr);
4193 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004194
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004195 if (dev->destructor)
4196 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004197
4198 /* Free network device */
4199 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 }
4201
4202out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004203 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204}
4205
Rusty Russell5a1b5892007-04-28 21:04:03 -07004206static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004207{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004208 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004209}
4210
David S. Millerdc2b4842008-07-08 17:18:23 -07004211static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004212 struct netdev_queue *queue,
4213 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004214{
David S. Millerdc2b4842008-07-08 17:18:23 -07004215 queue->dev = dev;
4216}
4217
David S. Millerbb949fb2008-07-08 16:55:56 -07004218static void netdev_init_queues(struct net_device *dev)
4219{
David S. Millere8a04642008-07-17 00:34:19 -07004220 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4221 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07004222 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07004223}
4224
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004226 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 * @sizeof_priv: size of private data to allocate space for
4228 * @name: device name format string
4229 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004230 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 *
4232 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004233 * and performs basic initialization. Also allocates subquue structs
4234 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004236struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4237 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238{
David S. Millere8a04642008-07-17 00:34:19 -07004239 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07004241 size_t alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004242 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004244 BUG_ON(strlen(name) >= sizeof(dev->name));
4245
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004246 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004247 if (sizeof_priv) {
4248 /* ensure 32-byte alignment of private area */
4249 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4250 alloc_size += sizeof_priv;
4251 }
4252 /* ensure 32-byte alignment of whole construct */
4253 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004255 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004257 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 return NULL;
4259 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260
Stephen Hemminger79439862008-07-21 13:28:44 -07004261 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07004262 if (!tx) {
4263 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4264 "tx qdiscs.\n");
4265 kfree(p);
4266 return NULL;
4267 }
4268
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 dev = (struct net_device *)
4270 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4271 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004272 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273
David S. Millere8a04642008-07-17 00:34:19 -07004274 dev->_tx = tx;
4275 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004276 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07004277
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004278 if (sizeof_priv) {
4279 dev->priv = ((char *)dev +
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004280 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004281 & ~NETDEV_ALIGN_CONST));
4282 }
4283
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004284 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
David S. Millerbb949fb2008-07-08 16:55:56 -07004286 netdev_init_queues(dev);
4287
Rusty Russell5a1b5892007-04-28 21:04:03 -07004288 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004289 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 setup(dev);
4291 strcpy(dev->name, name);
4292 return dev;
4293}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004294EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295
4296/**
4297 * free_netdev - free network device
4298 * @dev: device
4299 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004300 * This function does the last stage of destroying an allocated device
4301 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 * If this is the last reference then it will be freed.
4303 */
4304void free_netdev(struct net_device *dev)
4305{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004306 release_net(dev_net(dev));
4307
David S. Millere8a04642008-07-17 00:34:19 -07004308 kfree(dev->_tx);
4309
Stephen Hemminger3041a062006-05-26 13:25:24 -07004310 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 if (dev->reg_state == NETREG_UNINITIALIZED) {
4312 kfree((char *)dev - dev->padded);
4313 return;
4314 }
4315
4316 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4317 dev->reg_state = NETREG_RELEASED;
4318
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004319 /* will free via device release */
4320 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004322
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004324void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325{
4326 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004327 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328}
4329
4330/**
4331 * unregister_netdevice - remove device from the kernel
4332 * @dev: device
4333 *
4334 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004335 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336 *
4337 * Callers must hold the rtnl semaphore. You may want
4338 * unregister_netdev() instead of this.
4339 */
4340
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004341void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342{
Herbert Xua6620712007-12-12 19:21:56 -08004343 ASSERT_RTNL();
4344
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004345 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 /* Finish processing unregister after unlock */
4347 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348}
4349
4350/**
4351 * unregister_netdev - remove device from the kernel
4352 * @dev: device
4353 *
4354 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004355 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 *
4357 * This is just a wrapper for unregister_netdevice that takes
4358 * the rtnl semaphore. In general you want to use this and not
4359 * unregister_netdevice.
4360 */
4361void unregister_netdev(struct net_device *dev)
4362{
4363 rtnl_lock();
4364 unregister_netdevice(dev);
4365 rtnl_unlock();
4366}
4367
4368EXPORT_SYMBOL(unregister_netdev);
4369
Eric W. Biedermance286d32007-09-12 13:53:49 +02004370/**
4371 * dev_change_net_namespace - move device to different nethost namespace
4372 * @dev: device
4373 * @net: network namespace
4374 * @pat: If not NULL name pattern to try if the current device name
4375 * is already taken in the destination network namespace.
4376 *
4377 * This function shuts down a device interface and moves it
4378 * to a new network namespace. On success 0 is returned, on
4379 * a failure a netagive errno code is returned.
4380 *
4381 * Callers must hold the rtnl semaphore.
4382 */
4383
4384int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4385{
4386 char buf[IFNAMSIZ];
4387 const char *destname;
4388 int err;
4389
4390 ASSERT_RTNL();
4391
4392 /* Don't allow namespace local devices to be moved. */
4393 err = -EINVAL;
4394 if (dev->features & NETIF_F_NETNS_LOCAL)
4395 goto out;
4396
4397 /* Ensure the device has been registrered */
4398 err = -EINVAL;
4399 if (dev->reg_state != NETREG_REGISTERED)
4400 goto out;
4401
4402 /* Get out if there is nothing todo */
4403 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004404 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004405 goto out;
4406
4407 /* Pick the destination device name, and ensure
4408 * we can use it in the destination network namespace.
4409 */
4410 err = -EEXIST;
4411 destname = dev->name;
4412 if (__dev_get_by_name(net, destname)) {
4413 /* We get here if we can't use the current device name */
4414 if (!pat)
4415 goto out;
4416 if (!dev_valid_name(pat))
4417 goto out;
4418 if (strchr(pat, '%')) {
4419 if (__dev_alloc_name(net, pat, buf) < 0)
4420 goto out;
4421 destname = buf;
4422 } else
4423 destname = pat;
4424 if (__dev_get_by_name(net, destname))
4425 goto out;
4426 }
4427
4428 /*
4429 * And now a mini version of register_netdevice unregister_netdevice.
4430 */
4431
4432 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004433 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004434
4435 /* And unlink it from device chain */
4436 err = -ENODEV;
4437 unlist_netdevice(dev);
4438
4439 synchronize_net();
4440
4441 /* Shutdown queueing discipline. */
4442 dev_shutdown(dev);
4443
4444 /* Notify protocols, that we are about to destroy
4445 this device. They should clean all the things.
4446 */
4447 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4448
4449 /*
4450 * Flush the unicast and multicast chains
4451 */
4452 dev_addr_discard(dev);
4453
4454 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004455 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004456
4457 /* Assign the new device name */
4458 if (destname != dev->name)
4459 strcpy(dev->name, destname);
4460
4461 /* If there is an ifindex conflict assign a new one */
4462 if (__dev_get_by_index(net, dev->ifindex)) {
4463 int iflink = (dev->iflink == dev->ifindex);
4464 dev->ifindex = dev_new_index(net);
4465 if (iflink)
4466 dev->iflink = dev->ifindex;
4467 }
4468
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004469 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004470 netdev_unregister_kobject(dev);
4471 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004472 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004473
4474 /* Add the device back in the hashes */
4475 list_netdevice(dev);
4476
4477 /* Notify protocols, that a new device appeared. */
4478 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4479
4480 synchronize_net();
4481 err = 0;
4482out:
4483 return err;
4484}
4485
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486static int dev_cpu_callback(struct notifier_block *nfb,
4487 unsigned long action,
4488 void *ocpu)
4489{
4490 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07004491 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 struct sk_buff *skb;
4493 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4494 struct softnet_data *sd, *oldsd;
4495
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004496 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497 return NOTIFY_OK;
4498
4499 local_irq_disable();
4500 cpu = smp_processor_id();
4501 sd = &per_cpu(softnet_data, cpu);
4502 oldsd = &per_cpu(softnet_data, oldcpu);
4503
4504 /* Find end of our completion_queue. */
4505 list_skb = &sd->completion_queue;
4506 while (*list_skb)
4507 list_skb = &(*list_skb)->next;
4508 /* Append completion queue from offline CPU. */
4509 *list_skb = oldsd->completion_queue;
4510 oldsd->completion_queue = NULL;
4511
4512 /* Find end of our output_queue. */
4513 list_net = &sd->output_queue;
4514 while (*list_net)
4515 list_net = &(*list_net)->next_sched;
4516 /* Append output queue from offline CPU. */
4517 *list_net = oldsd->output_queue;
4518 oldsd->output_queue = NULL;
4519
4520 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4521 local_irq_enable();
4522
4523 /* Process offline CPU's input_pkt_queue */
4524 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4525 netif_rx(skb);
4526
4527 return NOTIFY_OK;
4528}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529
Chris Leechdb217332006-06-17 21:24:58 -07004530#ifdef CONFIG_NET_DMA
4531/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004532 * net_dma_rebalance - try to maintain one DMA channel per CPU
4533 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4534 *
4535 * This is called when the number of channels allocated to the net_dma client
4536 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004537 */
Dan Williamsd379b012007-07-09 11:56:42 -07004538
4539static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004540{
Dan Williamsd379b012007-07-09 11:56:42 -07004541 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004542 struct dma_chan *chan;
4543
Dan Williamsd379b012007-07-09 11:56:42 -07004544 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004545 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004546 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004547 return;
4548 }
4549
4550 i = 0;
4551 cpu = first_cpu(cpu_online_map);
4552
Mike Travis0e12f842008-05-12 21:21:13 +02004553 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
Dan Williamsd379b012007-07-09 11:56:42 -07004554 chan = net_dma->channels[chan_idx];
4555
4556 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4557 + (i < (num_online_cpus() %
4558 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004559
4560 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004561 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004562 cpu = next_cpu(cpu, cpu_online_map);
4563 n--;
4564 }
4565 i++;
4566 }
Chris Leechdb217332006-06-17 21:24:58 -07004567}
4568
4569/**
4570 * netdev_dma_event - event callback for the net_dma_client
4571 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004572 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004573 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004574 */
Dan Williamsd379b012007-07-09 11:56:42 -07004575static enum dma_state_client
4576netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4577 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004578{
Dan Williamsd379b012007-07-09 11:56:42 -07004579 int i, found = 0, pos = -1;
4580 struct net_dma *net_dma =
4581 container_of(client, struct net_dma, client);
4582 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4583
4584 spin_lock(&net_dma->lock);
4585 switch (state) {
4586 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004587 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004588 if (net_dma->channels[i] == chan) {
4589 found = 1;
4590 break;
4591 } else if (net_dma->channels[i] == NULL && pos < 0)
4592 pos = i;
4593
4594 if (!found && pos >= 0) {
4595 ack = DMA_ACK;
4596 net_dma->channels[pos] = chan;
4597 cpu_set(pos, net_dma->channel_mask);
4598 net_dma_rebalance(net_dma);
4599 }
Chris Leechdb217332006-06-17 21:24:58 -07004600 break;
4601 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004602 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004603 if (net_dma->channels[i] == chan) {
4604 found = 1;
4605 pos = i;
4606 break;
4607 }
4608
4609 if (found) {
4610 ack = DMA_ACK;
4611 cpu_clear(pos, net_dma->channel_mask);
4612 net_dma->channels[i] = NULL;
4613 net_dma_rebalance(net_dma);
4614 }
Chris Leechdb217332006-06-17 21:24:58 -07004615 break;
4616 default:
4617 break;
4618 }
Dan Williamsd379b012007-07-09 11:56:42 -07004619 spin_unlock(&net_dma->lock);
4620
4621 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004622}
4623
4624/**
4625 * netdev_dma_regiser - register the networking subsystem as a DMA client
4626 */
4627static int __init netdev_dma_register(void)
4628{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004629 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4630 GFP_KERNEL);
4631 if (unlikely(!net_dma.channels)) {
4632 printk(KERN_NOTICE
4633 "netdev_dma: no memory for net_dma.channels\n");
4634 return -ENOMEM;
4635 }
Dan Williamsd379b012007-07-09 11:56:42 -07004636 spin_lock_init(&net_dma.lock);
4637 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4638 dma_async_client_register(&net_dma.client);
4639 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004640 return 0;
4641}
4642
4643#else
4644static int __init netdev_dma_register(void) { return -ENODEV; }
4645#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646
Herbert Xu7f353bf2007-08-10 15:47:58 -07004647/**
4648 * netdev_compute_feature - compute conjunction of two feature sets
4649 * @all: first feature set
4650 * @one: second feature set
4651 *
4652 * Computes a new feature set after adding a device with feature set
4653 * @one to the master device with current feature set @all. Returns
4654 * the new feature set.
4655 */
4656int netdev_compute_features(unsigned long all, unsigned long one)
4657{
4658 /* if device needs checksumming, downgrade to hw checksumming */
4659 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4660 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4661
4662 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4663 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4664 all ^= NETIF_F_HW_CSUM
4665 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4666
4667 if (one & NETIF_F_GSO)
4668 one |= NETIF_F_GSO_SOFTWARE;
4669 one |= NETIF_F_GSO;
4670
4671 /* If even one device supports robust GSO, enable it for all. */
4672 if (one & NETIF_F_GSO_ROBUST)
4673 all |= NETIF_F_GSO_ROBUST;
4674
4675 all &= one | NETIF_F_LLTX;
4676
4677 if (!(all & NETIF_F_ALL_CSUM))
4678 all &= ~NETIF_F_SG;
4679 if (!(all & NETIF_F_SG))
4680 all &= ~NETIF_F_GSO_MASK;
4681
4682 return all;
4683}
4684EXPORT_SYMBOL(netdev_compute_features);
4685
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004686static struct hlist_head *netdev_create_hash(void)
4687{
4688 int i;
4689 struct hlist_head *hash;
4690
4691 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4692 if (hash != NULL)
4693 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4694 INIT_HLIST_HEAD(&hash[i]);
4695
4696 return hash;
4697}
4698
Eric W. Biederman881d9662007-09-17 11:56:21 -07004699/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004700static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004701{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004702 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004703
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004704 net->dev_name_head = netdev_create_hash();
4705 if (net->dev_name_head == NULL)
4706 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004707
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004708 net->dev_index_head = netdev_create_hash();
4709 if (net->dev_index_head == NULL)
4710 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004711
4712 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004713
4714err_idx:
4715 kfree(net->dev_name_head);
4716err_name:
4717 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004718}
4719
Arjan van de Ven6579e572008-07-21 13:31:48 -07004720char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4721{
4722 struct device_driver *driver;
4723 struct device *parent;
4724
4725 if (len <= 0 || !buffer)
4726 return buffer;
4727 buffer[0] = 0;
4728
4729 parent = dev->dev.parent;
4730
4731 if (!parent)
4732 return buffer;
4733
4734 driver = parent->driver;
4735 if (driver && driver->name)
4736 strlcpy(buffer, driver->name, len);
4737 return buffer;
4738}
4739
Pavel Emelyanov46650792007-10-08 20:38:39 -07004740static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004741{
4742 kfree(net->dev_name_head);
4743 kfree(net->dev_index_head);
4744}
4745
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004746static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004747 .init = netdev_init,
4748 .exit = netdev_exit,
4749};
4750
Pavel Emelyanov46650792007-10-08 20:38:39 -07004751static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004752{
4753 struct net_device *dev, *next;
4754 /*
4755 * Push all migratable of the network devices back to the
4756 * initial network namespace
4757 */
4758 rtnl_lock();
4759 for_each_netdev_safe(net, dev, next) {
4760 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004761 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004762
4763 /* Ignore unmoveable devices (i.e. loopback) */
4764 if (dev->features & NETIF_F_NETNS_LOCAL)
4765 continue;
4766
4767 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004768 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4769 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004770 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004771 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004772 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004773 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004774 }
4775 }
4776 rtnl_unlock();
4777}
4778
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004779static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004780 .exit = default_device_exit,
4781};
4782
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783/*
4784 * Initialize the DEV module. At boot time this walks the device list and
4785 * unhooks any devices that fail to initialise (normally hardware not
4786 * present) and leaves us with a valid list of present and active devices.
4787 *
4788 */
4789
4790/*
4791 * This is called single threaded during boot, so no need
4792 * to take the rtnl semaphore.
4793 */
4794static int __init net_dev_init(void)
4795{
4796 int i, rc = -ENOMEM;
4797
4798 BUG_ON(!dev_boot_phase);
4799
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 if (dev_proc_init())
4801 goto out;
4802
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004803 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 goto out;
4805
4806 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004807 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 INIT_LIST_HEAD(&ptype_base[i]);
4809
Eric W. Biederman881d9662007-09-17 11:56:21 -07004810 if (register_pernet_subsys(&netdev_net_ops))
4811 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812
Eric W. Biedermance286d32007-09-12 13:53:49 +02004813 if (register_pernet_device(&default_device_ops))
4814 goto out;
4815
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 /*
4817 * Initialise the packet receive queues.
4818 */
4819
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004820 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 struct softnet_data *queue;
4822
4823 queue = &per_cpu(softnet_data, i);
4824 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825 queue->completion_queue = NULL;
4826 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004827
4828 queue->backlog.poll = process_backlog;
4829 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830 }
4831
Chris Leechdb217332006-06-17 21:24:58 -07004832 netdev_dma_register();
4833
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 dev_boot_phase = 0;
4835
Carlos R. Mafra962cf362008-05-15 11:15:37 -03004836 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4837 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838
4839 hotcpu_notifier(dev_cpu_callback, 0);
4840 dst_init();
4841 dev_mcast_init();
4842 rc = 0;
4843out:
4844 return rc;
4845}
4846
4847subsys_initcall(net_dev_init);
4848
4849EXPORT_SYMBOL(__dev_get_by_index);
4850EXPORT_SYMBOL(__dev_get_by_name);
4851EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004852EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853EXPORT_SYMBOL(dev_add_pack);
4854EXPORT_SYMBOL(dev_alloc_name);
4855EXPORT_SYMBOL(dev_close);
4856EXPORT_SYMBOL(dev_get_by_flags);
4857EXPORT_SYMBOL(dev_get_by_index);
4858EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004859EXPORT_SYMBOL(dev_open);
4860EXPORT_SYMBOL(dev_queue_xmit);
4861EXPORT_SYMBOL(dev_remove_pack);
4862EXPORT_SYMBOL(dev_set_allmulti);
4863EXPORT_SYMBOL(dev_set_promiscuity);
4864EXPORT_SYMBOL(dev_change_flags);
4865EXPORT_SYMBOL(dev_set_mtu);
4866EXPORT_SYMBOL(dev_set_mac_address);
4867EXPORT_SYMBOL(free_netdev);
4868EXPORT_SYMBOL(netdev_boot_setup_check);
4869EXPORT_SYMBOL(netdev_set_master);
4870EXPORT_SYMBOL(netdev_state_change);
4871EXPORT_SYMBOL(netif_receive_skb);
4872EXPORT_SYMBOL(netif_rx);
4873EXPORT_SYMBOL(register_gifconf);
4874EXPORT_SYMBOL(register_netdevice);
4875EXPORT_SYMBOL(register_netdevice_notifier);
4876EXPORT_SYMBOL(skb_checksum_help);
4877EXPORT_SYMBOL(synchronize_net);
4878EXPORT_SYMBOL(unregister_netdevice);
4879EXPORT_SYMBOL(unregister_netdevice_notifier);
4880EXPORT_SYMBOL(net_enable_timestamp);
4881EXPORT_SYMBOL(net_disable_timestamp);
4882EXPORT_SYMBOL(dev_get_flags);
4883
4884#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4885EXPORT_SYMBOL(br_handle_frame_hook);
4886EXPORT_SYMBOL(br_fdb_get_hook);
4887EXPORT_SYMBOL(br_fdb_put_hook);
4888#endif
4889
4890#ifdef CONFIG_KMOD
4891EXPORT_SYMBOL(dev_load);
4892#endif
4893
4894EXPORT_PER_CPU_SYMBOL(softnet_data);