blob: 3a3d5ee739098b66f4a84ca0d708aff275a3b64a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
93#include <linux/notifier.h>
94#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020095#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#include <net/sock.h>
97#include <linux/rtnetlink.h>
98#include <linux/proc_fs.h>
99#include <linux/seq_file.h>
100#include <linux/stat.h>
101#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700102#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
106#include <linux/highmem.h>
107#include <linux/init.h>
108#include <linux/kmod.h>
109#include <linux/module.h>
110#include <linux/kallsyms.h>
111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123/*
124 * The list of packet types we will receive (as opposed to discard)
125 * and the routines to invoke.
126 *
127 * Why 16. Because with 16 the only overlap we get on a hash of the
128 * low nibble of the protocol value is RARP/SNAP/X.25.
129 *
130 * NOTE: That is no longer true with the addition of VLAN tags. Not
131 * sure which should go first, but I bet it won't make much
132 * difference if we are running VLANs. The good news is that
133 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700134 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 * --BLG
136 *
137 * 0800 IP
138 * 8100 802.1Q VLAN
139 * 0001 802.3
140 * 0002 AX.25
141 * 0004 802.2
142 * 8035 RARP
143 * 0005 SNAP
144 * 0805 X.25
145 * 0806 ARP
146 * 8137 IPX
147 * 0009 Localtalk
148 * 86DD IPv6
149 */
150
151static DEFINE_SPINLOCK(ptype_lock);
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700152static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
153static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Chris Leechdb217332006-06-17 21:24:58 -0700155#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700156struct net_dma {
157 struct dma_client client;
158 spinlock_t lock;
159 cpumask_t channel_mask;
160 struct dma_chan *channels[NR_CPUS];
161};
162
163static enum dma_state_client
164netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
165 enum dma_state state);
166
167static struct net_dma net_dma = {
168 .client = {
169 .event_callback = netdev_dma_event,
170 },
171};
Chris Leechdb217332006-06-17 21:24:58 -0700172#endif
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
211/*
212 * Our notifier list
213 */
214
Alan Sternf07d5b92006-05-09 15:23:03 -0700215static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217/*
218 * Device drivers call our routines to queue packets here. We empty the
219 * queue in the local softnet handler.
220 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700221
222DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224#ifdef CONFIG_SYSFS
225extern int netdev_sysfs_init(void);
226extern int netdev_register_sysfs(struct net_device *);
227extern void netdev_unregister_sysfs(struct net_device *);
228#else
229#define netdev_sysfs_init() (0)
230#define netdev_register_sysfs(dev) (0)
231#define netdev_unregister_sysfs(dev) do { } while(0)
232#endif
233
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700234#ifdef CONFIG_DEBUG_LOCK_ALLOC
235/*
236 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
237 * according to dev->type
238 */
239static const unsigned short netdev_lock_type[] =
240 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
241 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
242 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
243 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
244 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
245 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
246 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
247 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
248 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
249 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
250 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
251 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
252 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
253 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
254 ARPHRD_NONE};
255
256static const char *netdev_lock_name[] =
257 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
258 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
259 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
260 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
261 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
262 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
263 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
264 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
265 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
266 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
267 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
268 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
269 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
270 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
271 "_xmit_NONE"};
272
273static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
274
275static inline unsigned short netdev_lock_pos(unsigned short dev_type)
276{
277 int i;
278
279 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
280 if (netdev_lock_type[i] == dev_type)
281 return i;
282 /* the last key is used by default */
283 return ARRAY_SIZE(netdev_lock_type) - 1;
284}
285
286static inline void netdev_set_lockdep_class(spinlock_t *lock,
287 unsigned short dev_type)
288{
289 int i;
290
291 i = netdev_lock_pos(dev_type);
292 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
293 netdev_lock_name[i]);
294}
295#else
296static inline void netdev_set_lockdep_class(spinlock_t *lock,
297 unsigned short dev_type)
298{
299}
300#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302/*******************************************************************************
303
304 Protocol management and registration routines
305
306*******************************************************************************/
307
308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Add a protocol ID to the list. Now that the input handler is
310 * smarter we can dispense with all the messy stuff that used to be
311 * here.
312 *
313 * BEWARE!!! Protocol handlers, mangling input packets,
314 * MUST BE last in hash buckets and checking protocol handlers
315 * MUST start from promiscuous ptype_all chain in net_bh.
316 * It is true now, do not change it.
317 * Explanation follows: if protocol handler, mangling packet, will
318 * be the first on list, it is not able to sense, that packet
319 * is cloned and should be copied-on-write, so that it will
320 * change it and subsequent readers will get broken packet.
321 * --ANK (980803)
322 */
323
324/**
325 * dev_add_pack - add packet handler
326 * @pt: packet type declaration
327 *
328 * Add a protocol handler to the networking stack. The passed &packet_type
329 * is linked into kernel lists and may not be freed until it has been
330 * removed from the kernel lists.
331 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900332 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 * guarantee all CPU's that are in middle of receiving packets
334 * will see the new packet type (until the next received packet).
335 */
336
337void dev_add_pack(struct packet_type *pt)
338{
339 int hash;
340
341 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700342 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700344 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 hash = ntohs(pt->type) & 15;
346 list_add_rcu(&pt->list, &ptype_base[hash]);
347 }
348 spin_unlock_bh(&ptype_lock);
349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351/**
352 * __dev_remove_pack - remove packet handler
353 * @pt: packet type declaration
354 *
355 * Remove a protocol handler that was previously added to the kernel
356 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
357 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900358 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 *
360 * The packet type might still be in use by receivers
361 * and must not be freed until after all the CPU's have gone
362 * through a quiescent state.
363 */
364void __dev_remove_pack(struct packet_type *pt)
365{
366 struct list_head *head;
367 struct packet_type *pt1;
368
369 spin_lock_bh(&ptype_lock);
370
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700371 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700373 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 head = &ptype_base[ntohs(pt->type) & 15];
375
376 list_for_each_entry(pt1, head, list) {
377 if (pt == pt1) {
378 list_del_rcu(&pt->list);
379 goto out;
380 }
381 }
382
383 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
384out:
385 spin_unlock_bh(&ptype_lock);
386}
387/**
388 * dev_remove_pack - remove packet handler
389 * @pt: packet type declaration
390 *
391 * Remove a protocol handler that was previously added to the kernel
392 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
393 * from the kernel lists and can be freed or reused once this function
394 * returns.
395 *
396 * This call sleeps to guarantee that no CPU is looking at the packet
397 * type after return.
398 */
399void dev_remove_pack(struct packet_type *pt)
400{
401 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 synchronize_net();
404}
405
406/******************************************************************************
407
408 Device Boot-time Settings Routines
409
410*******************************************************************************/
411
412/* Boot time configuration table */
413static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
414
415/**
416 * netdev_boot_setup_add - add new setup entry
417 * @name: name of the device
418 * @map: configured settings for the device
419 *
420 * Adds new setup entry to the dev_boot_setup list. The function
421 * returns 0 on error and 1 on success. This is a generic routine to
422 * all netdevices.
423 */
424static int netdev_boot_setup_add(char *name, struct ifmap *map)
425{
426 struct netdev_boot_setup *s;
427 int i;
428
429 s = dev_boot_setup;
430 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
431 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
432 memset(s[i].name, 0, sizeof(s[i].name));
433 strcpy(s[i].name, name);
434 memcpy(&s[i].map, map, sizeof(s[i].map));
435 break;
436 }
437 }
438
439 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
440}
441
442/**
443 * netdev_boot_setup_check - check boot time settings
444 * @dev: the netdevice
445 *
446 * Check boot time settings for the device.
447 * The found settings are set for the device to be used
448 * later in the device probing.
449 * Returns 0 if no settings found, 1 if they are.
450 */
451int netdev_boot_setup_check(struct net_device *dev)
452{
453 struct netdev_boot_setup *s = dev_boot_setup;
454 int i;
455
456 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
457 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
458 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
459 dev->irq = s[i].map.irq;
460 dev->base_addr = s[i].map.base_addr;
461 dev->mem_start = s[i].map.mem_start;
462 dev->mem_end = s[i].map.mem_end;
463 return 1;
464 }
465 }
466 return 0;
467}
468
469
470/**
471 * netdev_boot_base - get address from boot time settings
472 * @prefix: prefix for network device
473 * @unit: id for network device
474 *
475 * Check boot time settings for the base address of device.
476 * The found settings are set for the device to be used
477 * later in the device probing.
478 * Returns 0 if no settings found.
479 */
480unsigned long netdev_boot_base(const char *prefix, int unit)
481{
482 const struct netdev_boot_setup *s = dev_boot_setup;
483 char name[IFNAMSIZ];
484 int i;
485
486 sprintf(name, "%s%d", prefix, unit);
487
488 /*
489 * If device already registered then return base of 1
490 * to indicate not to probe for this interface
491 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700492 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 return 1;
494
495 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
496 if (!strcmp(name, s[i].name))
497 return s[i].map.base_addr;
498 return 0;
499}
500
501/*
502 * Saves at boot time configured settings for any netdevice.
503 */
504int __init netdev_boot_setup(char *str)
505{
506 int ints[5];
507 struct ifmap map;
508
509 str = get_options(str, ARRAY_SIZE(ints), ints);
510 if (!str || !*str)
511 return 0;
512
513 /* Save settings */
514 memset(&map, 0, sizeof(map));
515 if (ints[0] > 0)
516 map.irq = ints[1];
517 if (ints[0] > 1)
518 map.base_addr = ints[2];
519 if (ints[0] > 2)
520 map.mem_start = ints[3];
521 if (ints[0] > 3)
522 map.mem_end = ints[4];
523
524 /* Add new entry to the list */
525 return netdev_boot_setup_add(str, &map);
526}
527
528__setup("netdev=", netdev_boot_setup);
529
530/*******************************************************************************
531
532 Device Interface Subroutines
533
534*******************************************************************************/
535
536/**
537 * __dev_get_by_name - find a device by its name
538 * @name: name to find
539 *
540 * Find an interface by name. Must be called under RTNL semaphore
541 * or @dev_base_lock. If the name is found a pointer to the device
542 * is returned. If the name is not found then %NULL is returned. The
543 * reference counters are not incremented so the caller must be
544 * careful with locks.
545 */
546
Eric W. Biederman881d9662007-09-17 11:56:21 -0700547struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 struct hlist_node *p;
550
Eric W. Biederman881d9662007-09-17 11:56:21 -0700551 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 struct net_device *dev
553 = hlist_entry(p, struct net_device, name_hlist);
554 if (!strncmp(dev->name, name, IFNAMSIZ))
555 return dev;
556 }
557 return NULL;
558}
559
560/**
561 * dev_get_by_name - find a device by its name
562 * @name: name to find
563 *
564 * Find an interface by name. This can be called from any
565 * context and does its own locking. The returned handle has
566 * the usage count incremented and the caller must use dev_put() to
567 * release it when it is no longer needed. %NULL is returned if no
568 * matching device is found.
569 */
570
Eric W. Biederman881d9662007-09-17 11:56:21 -0700571struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 struct net_device *dev;
574
575 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700576 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (dev)
578 dev_hold(dev);
579 read_unlock(&dev_base_lock);
580 return dev;
581}
582
583/**
584 * __dev_get_by_index - find a device by its ifindex
585 * @ifindex: index of device
586 *
587 * Search for an interface by index. Returns %NULL if the device
588 * is not found or a pointer to the device. The device has not
589 * had its reference counter increased so the caller must be careful
590 * about locking. The caller must hold either the RTNL semaphore
591 * or @dev_base_lock.
592 */
593
Eric W. Biederman881d9662007-09-17 11:56:21 -0700594struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
596 struct hlist_node *p;
597
Eric W. Biederman881d9662007-09-17 11:56:21 -0700598 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 struct net_device *dev
600 = hlist_entry(p, struct net_device, index_hlist);
601 if (dev->ifindex == ifindex)
602 return dev;
603 }
604 return NULL;
605}
606
607
608/**
609 * dev_get_by_index - find a device by its ifindex
610 * @ifindex: index of device
611 *
612 * Search for an interface by index. Returns NULL if the device
613 * is not found or a pointer to the device. The device returned has
614 * had a reference added and the pointer is safe until the user calls
615 * dev_put to indicate they have finished with it.
616 */
617
Eric W. Biederman881d9662007-09-17 11:56:21 -0700618struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{
620 struct net_device *dev;
621
622 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700623 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 if (dev)
625 dev_hold(dev);
626 read_unlock(&dev_base_lock);
627 return dev;
628}
629
630/**
631 * dev_getbyhwaddr - find a device by its hardware address
632 * @type: media type of device
633 * @ha: hardware address
634 *
635 * Search for an interface by MAC address. Returns NULL if the device
636 * is not found or a pointer to the device. The caller must hold the
637 * rtnl semaphore. The returned device has not had its ref count increased
638 * and the caller must therefore be careful about locking
639 *
640 * BUGS:
641 * If the API was consistent this would be __dev_get_by_hwaddr
642 */
643
Eric W. Biederman881d9662007-09-17 11:56:21 -0700644struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 struct net_device *dev;
647
648 ASSERT_RTNL();
649
Eric W. Biederman881d9662007-09-17 11:56:21 -0700650 for_each_netdev(&init_net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (dev->type == type &&
652 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700653 return dev;
654
655 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656}
657
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300658EXPORT_SYMBOL(dev_getbyhwaddr);
659
Eric W. Biederman881d9662007-09-17 11:56:21 -0700660struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700661{
662 struct net_device *dev;
663
664 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700665 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700666 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700667 return dev;
668
669 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700670}
671
672EXPORT_SYMBOL(__dev_getfirstbyhwtype);
673
Eric W. Biederman881d9662007-09-17 11:56:21 -0700674struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
676 struct net_device *dev;
677
678 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700679 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700680 if (dev)
681 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 rtnl_unlock();
683 return dev;
684}
685
686EXPORT_SYMBOL(dev_getfirstbyhwtype);
687
688/**
689 * dev_get_by_flags - find any device with given flags
690 * @if_flags: IFF_* values
691 * @mask: bitmask of bits in if_flags to check
692 *
693 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900694 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 * had a reference added and the pointer is safe until the user calls
696 * dev_put to indicate they have finished with it.
697 */
698
Eric W. Biederman881d9662007-09-17 11:56:21 -0700699struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700701 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Pavel Emelianov7562f872007-05-03 15:13:45 -0700703 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700705 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 if (((dev->flags ^ if_flags) & mask) == 0) {
707 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700708 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 break;
710 }
711 }
712 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700713 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
715
716/**
717 * dev_valid_name - check if name is okay for network device
718 * @name: name string
719 *
720 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700721 * to allow sysfs to work. We also disallow any kind of
722 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800724int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700726 if (*name == '\0')
727 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700728 if (strlen(name) >= IFNAMSIZ)
729 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700730 if (!strcmp(name, ".") || !strcmp(name, ".."))
731 return 0;
732
733 while (*name) {
734 if (*name == '/' || isspace(*name))
735 return 0;
736 name++;
737 }
738 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741/**
742 * dev_alloc_name - allocate a name for a device
743 * @dev: device
744 * @name: name format string
745 *
746 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700747 * id. It scans list of devices to build up a free map, then chooses
748 * the first empty slot. The caller must hold the dev_base or rtnl lock
749 * while allocating the name and adding the device in order to avoid
750 * duplicates.
751 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
752 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 */
754
755int dev_alloc_name(struct net_device *dev, const char *name)
756{
757 int i = 0;
758 char buf[IFNAMSIZ];
759 const char *p;
760 const int max_netdevices = 8*PAGE_SIZE;
761 long *inuse;
762 struct net_device *d;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700763 struct net *net;
764
765 BUG_ON(!dev->nd_net);
766 net = dev->nd_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 p = strnchr(name, IFNAMSIZ-1, '%');
769 if (p) {
770 /*
771 * Verify the string as this thing may have come from
772 * the user. There must be either one "%d" and no other "%"
773 * characters.
774 */
775 if (p[1] != 'd' || strchr(p + 2, '%'))
776 return -EINVAL;
777
778 /* Use one page as a bit array of possible slots */
779 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
780 if (!inuse)
781 return -ENOMEM;
782
Eric W. Biederman881d9662007-09-17 11:56:21 -0700783 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 if (!sscanf(d->name, name, &i))
785 continue;
786 if (i < 0 || i >= max_netdevices)
787 continue;
788
789 /* avoid cases where sscanf is not exact inverse of printf */
790 snprintf(buf, sizeof(buf), name, i);
791 if (!strncmp(buf, d->name, IFNAMSIZ))
792 set_bit(i, inuse);
793 }
794
795 i = find_first_zero_bit(inuse, max_netdevices);
796 free_page((unsigned long) inuse);
797 }
798
799 snprintf(buf, sizeof(buf), name, i);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700800 if (!__dev_get_by_name(net, buf)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 strlcpy(dev->name, buf, IFNAMSIZ);
802 return i;
803 }
804
805 /* It is possible to run out of possible slots
806 * when the name is long and there isn't enough space left
807 * for the digits, or if all bits are used.
808 */
809 return -ENFILE;
810}
811
812
813/**
814 * dev_change_name - change name of a device
815 * @dev: device
816 * @newname: name (or format string) must be at least IFNAMSIZ
817 *
818 * Change name of a device, can pass format strings "eth%d".
819 * for wildcarding.
820 */
821int dev_change_name(struct net_device *dev, char *newname)
822{
Herbert Xufcc5a032007-07-30 17:03:38 -0700823 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700825 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700826 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700829 BUG_ON(!dev->nd_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Eric W. Biederman881d9662007-09-17 11:56:21 -0700831 net = dev->nd_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (dev->flags & IFF_UP)
833 return -EBUSY;
834
835 if (!dev_valid_name(newname))
836 return -EINVAL;
837
Herbert Xufcc5a032007-07-30 17:03:38 -0700838 memcpy(oldname, dev->name, IFNAMSIZ);
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 if (strchr(newname, '%')) {
841 err = dev_alloc_name(dev, newname);
842 if (err < 0)
843 return err;
844 strcpy(newname, dev->name);
845 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700846 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -EEXIST;
848 else
849 strlcpy(dev->name, newname, IFNAMSIZ);
850
Herbert Xufcc5a032007-07-30 17:03:38 -0700851rollback:
Eric W. Biederman92749822007-04-03 00:07:30 -0600852 device_rename(&dev->dev, dev->name);
Herbert Xu7f988ea2007-07-30 16:35:46 -0700853
854 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600855 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700856 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700857 write_unlock_bh(&dev_base_lock);
858
Herbert Xufcc5a032007-07-30 17:03:38 -0700859 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
860 ret = notifier_to_errno(ret);
861
862 if (ret) {
863 if (err) {
864 printk(KERN_ERR
865 "%s: name change rollback failed: %d.\n",
866 dev->name, ret);
867 } else {
868 err = ret;
869 memcpy(dev->name, oldname, IFNAMSIZ);
870 goto rollback;
871 }
872 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 return err;
875}
876
877/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700878 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700879 * @dev: device to cause notification
880 *
881 * Called to indicate a device has changed features.
882 */
883void netdev_features_change(struct net_device *dev)
884{
Alan Sternf07d5b92006-05-09 15:23:03 -0700885 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700886}
887EXPORT_SYMBOL(netdev_features_change);
888
889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * netdev_state_change - device changes state
891 * @dev: device to cause notification
892 *
893 * Called to indicate a device has changed state. This function calls
894 * the notifier chains for netdev_chain and sends a NEWLINK message
895 * to the routing socket.
896 */
897void netdev_state_change(struct net_device *dev)
898{
899 if (dev->flags & IFF_UP) {
Alan Sternf07d5b92006-05-09 15:23:03 -0700900 raw_notifier_call_chain(&netdev_chain,
Alan Sterne041c682006-03-27 01:16:30 -0800901 NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
903 }
904}
905
906/**
907 * dev_load - load a network module
908 * @name: name of interface
909 *
910 * If a network interface is not present and the process has suitable
911 * privileges this function loads the module. If module loading is not
912 * available in this kernel then it becomes a nop.
913 */
914
Eric W. Biederman881d9662007-09-17 11:56:21 -0700915void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900917 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
919 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700920 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 read_unlock(&dev_base_lock);
922
923 if (!dev && capable(CAP_SYS_MODULE))
924 request_module("%s", name);
925}
926
927static int default_rebuild_header(struct sk_buff *skb)
928{
929 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
930 skb->dev ? skb->dev->name : "NULL!!!");
931 kfree_skb(skb);
932 return 1;
933}
934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935/**
936 * dev_open - prepare an interface for use.
937 * @dev: device to open
938 *
939 * Takes a device from down to up state. The device's private open
940 * function is invoked and then the multicast lists are loaded. Finally
941 * the device is moved into the up state and a %NETDEV_UP message is
942 * sent to the netdev notifier chain.
943 *
944 * Calling this function on an active interface is a nop. On a failure
945 * a negative errno code is returned.
946 */
947int dev_open(struct net_device *dev)
948{
949 int ret = 0;
950
951 /*
952 * Is it already up?
953 */
954
955 if (dev->flags & IFF_UP)
956 return 0;
957
958 /*
959 * Is it even present?
960 */
961 if (!netif_device_present(dev))
962 return -ENODEV;
963
964 /*
965 * Call device private open method
966 */
967 set_bit(__LINK_STATE_START, &dev->state);
968 if (dev->open) {
969 ret = dev->open(dev);
970 if (ret)
971 clear_bit(__LINK_STATE_START, &dev->state);
972 }
973
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900974 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 * If it went open OK then:
976 */
977
978 if (!ret) {
979 /*
980 * Set the flags.
981 */
982 dev->flags |= IFF_UP;
983
984 /*
985 * Initialize multicasting status
986 */
Patrick McHardy4417da62007-06-27 01:28:10 -0700987 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 /*
990 * Wakeup transmit queue engine
991 */
992 dev_activate(dev);
993
994 /*
995 * ... and announce new interface.
996 */
Alan Sternf07d5b92006-05-09 15:23:03 -0700997 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 }
999 return ret;
1000}
1001
1002/**
1003 * dev_close - shutdown an interface.
1004 * @dev: device to shutdown
1005 *
1006 * This function moves an active device into down state. A
1007 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1008 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1009 * chain.
1010 */
1011int dev_close(struct net_device *dev)
1012{
1013 if (!(dev->flags & IFF_UP))
1014 return 0;
1015
1016 /*
1017 * Tell people we are going down, so that they can
1018 * prepare to death, when device is still operating.
1019 */
Alan Sternf07d5b92006-05-09 15:23:03 -07001020 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 dev_deactivate(dev);
1023
1024 clear_bit(__LINK_STATE_START, &dev->state);
1025
1026 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001027 * it can be even on different cpu. So just clear netif_running().
1028 *
1029 * dev->stop() will invoke napi_disable() on all of it's
1030 * napi_struct instances on this device.
1031 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 /*
1035 * Call the device specific close. This cannot fail.
1036 * Only if device is UP
1037 *
1038 * We allow it to be called even after a DETACH hot-plug
1039 * event.
1040 */
1041 if (dev->stop)
1042 dev->stop(dev);
1043
1044 /*
1045 * Device is now down.
1046 */
1047
1048 dev->flags &= ~IFF_UP;
1049
1050 /*
1051 * Tell people we are down
1052 */
Alan Sternf07d5b92006-05-09 15:23:03 -07001053 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 return 0;
1056}
1057
1058
Eric W. Biederman881d9662007-09-17 11:56:21 -07001059static int dev_boot_phase = 1;
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061/*
1062 * Device change register/unregister. These are not inline or static
1063 * as we export them to the world.
1064 */
1065
1066/**
1067 * register_netdevice_notifier - register a network notifier block
1068 * @nb: notifier
1069 *
1070 * Register a notifier to be called when network device events occur.
1071 * The notifier passed is linked into the kernel structures and must
1072 * not be reused until it has been unregistered. A negative errno code
1073 * is returned on a failure.
1074 *
1075 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001076 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 * view of the network device list.
1078 */
1079
1080int register_netdevice_notifier(struct notifier_block *nb)
1081{
1082 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001083 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001084 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 int err;
1086
1087 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001088 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001089 if (err)
1090 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001091 if (dev_boot_phase)
1092 goto unlock;
1093 for_each_net(net) {
1094 for_each_netdev(net, dev) {
1095 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1096 err = notifier_to_errno(err);
1097 if (err)
1098 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Eric W. Biederman881d9662007-09-17 11:56:21 -07001100 if (!(dev->flags & IFF_UP))
1101 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001102
Eric W. Biederman881d9662007-09-17 11:56:21 -07001103 nb->notifier_call(nb, NETDEV_UP, dev);
1104 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001106
1107unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 rtnl_unlock();
1109 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001110
1111rollback:
1112 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001113 for_each_net(net) {
1114 for_each_netdev(net, dev) {
1115 if (dev == last)
1116 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001117
Eric W. Biederman881d9662007-09-17 11:56:21 -07001118 if (dev->flags & IFF_UP) {
1119 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1120 nb->notifier_call(nb, NETDEV_DOWN, dev);
1121 }
1122 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001123 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001124 }
1125 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126}
1127
1128/**
1129 * unregister_netdevice_notifier - unregister a network notifier block
1130 * @nb: notifier
1131 *
1132 * Unregister a notifier previously registered by
1133 * register_netdevice_notifier(). The notifier is unlinked into the
1134 * kernel structures and may then be reused. A negative errno code
1135 * is returned on a failure.
1136 */
1137
1138int unregister_netdevice_notifier(struct notifier_block *nb)
1139{
Herbert Xu9f514952006-03-25 01:24:25 -08001140 int err;
1141
1142 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001143 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001144 rtnl_unlock();
1145 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146}
1147
1148/**
1149 * call_netdevice_notifiers - call all network notifier blocks
1150 * @val: value passed unmodified to notifier function
1151 * @v: pointer passed unmodified to notifier function
1152 *
1153 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001154 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 */
1156
1157int call_netdevice_notifiers(unsigned long val, void *v)
1158{
Alan Sternf07d5b92006-05-09 15:23:03 -07001159 return raw_notifier_call_chain(&netdev_chain, val, v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160}
1161
1162/* When > 0 there are consumers of rx skb time stamps */
1163static atomic_t netstamp_needed = ATOMIC_INIT(0);
1164
1165void net_enable_timestamp(void)
1166{
1167 atomic_inc(&netstamp_needed);
1168}
1169
1170void net_disable_timestamp(void)
1171{
1172 atomic_dec(&netstamp_needed);
1173}
1174
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001175static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
1177 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001178 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001179 else
1180 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181}
1182
1183/*
1184 * Support routine. Sends outgoing frames to any network
1185 * taps currently in use.
1186 */
1187
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001188static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
1190 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001191
1192 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
1194 rcu_read_lock();
1195 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1196 /* Never send packets back to the socket
1197 * they originated from - MvS (miquels@drinkel.ow.org)
1198 */
1199 if ((ptype->dev == dev || !ptype->dev) &&
1200 (ptype->af_packet_priv == NULL ||
1201 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1202 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1203 if (!skb2)
1204 break;
1205
1206 /* skb->nh should be correctly
1207 set by sender, so that the second statement is
1208 just protection against buggy protocols.
1209 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001210 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001212 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001213 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 if (net_ratelimit())
1215 printk(KERN_CRIT "protocol %04x is "
1216 "buggy, dev %s\n",
1217 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001218 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001221 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001223 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 }
1225 }
1226 rcu_read_unlock();
1227}
1228
Denis Vlasenko56079432006-03-29 15:57:29 -08001229
1230void __netif_schedule(struct net_device *dev)
1231{
1232 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1233 unsigned long flags;
1234 struct softnet_data *sd;
1235
1236 local_irq_save(flags);
1237 sd = &__get_cpu_var(softnet_data);
1238 dev->next_sched = sd->output_queue;
1239 sd->output_queue = dev;
1240 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1241 local_irq_restore(flags);
1242 }
1243}
1244EXPORT_SYMBOL(__netif_schedule);
1245
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001246void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001247{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001248 if (atomic_dec_and_test(&skb->users)) {
1249 struct softnet_data *sd;
1250 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001251
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001252 local_irq_save(flags);
1253 sd = &__get_cpu_var(softnet_data);
1254 skb->next = sd->completion_queue;
1255 sd->completion_queue = skb;
1256 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1257 local_irq_restore(flags);
1258 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001259}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001260EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001261
1262void dev_kfree_skb_any(struct sk_buff *skb)
1263{
1264 if (in_irq() || irqs_disabled())
1265 dev_kfree_skb_irq(skb);
1266 else
1267 dev_kfree_skb(skb);
1268}
1269EXPORT_SYMBOL(dev_kfree_skb_any);
1270
1271
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001272/**
1273 * netif_device_detach - mark device as removed
1274 * @dev: network device
1275 *
1276 * Mark device as removed from system and therefore no longer available.
1277 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001278void netif_device_detach(struct net_device *dev)
1279{
1280 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1281 netif_running(dev)) {
1282 netif_stop_queue(dev);
1283 }
1284}
1285EXPORT_SYMBOL(netif_device_detach);
1286
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001287/**
1288 * netif_device_attach - mark device as attached
1289 * @dev: network device
1290 *
1291 * Mark device as attached from system and restart if needed.
1292 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001293void netif_device_attach(struct net_device *dev)
1294{
1295 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1296 netif_running(dev)) {
1297 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001298 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001299 }
1300}
1301EXPORT_SYMBOL(netif_device_attach);
1302
1303
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304/*
1305 * Invalidate hardware checksum when packet is to be mangled, and
1306 * complete checksum manually on outgoing path.
1307 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001308int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309{
Al Virod3bc23e2006-11-14 21:24:49 -08001310 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001311 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Patrick McHardy84fa7932006-08-29 16:44:56 -07001313 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001314 goto out_set_summed;
1315
1316 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001317 /* Let GSO fix up the checksum. */
1318 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 }
1320
1321 if (skb_cloned(skb)) {
1322 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1323 if (ret)
1324 goto out;
1325 }
1326
Herbert Xu663ead32007-04-09 11:59:07 -07001327 offset = skb->csum_start - skb_headroom(skb);
Kris Katterjohn09a62662006-01-08 22:24:28 -08001328 BUG_ON(offset > (int)skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1330
Herbert Xu663ead32007-04-09 11:59:07 -07001331 offset = skb_headlen(skb) - offset;
Kris Katterjohn09a62662006-01-08 22:24:28 -08001332 BUG_ON(offset <= 0);
Al Viroff1dcad2006-11-20 18:07:29 -08001333 BUG_ON(skb->csum_offset + 2 > offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
Herbert Xu663ead32007-04-09 11:59:07 -07001335 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) =
1336 csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001337out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001339out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 return ret;
1341}
1342
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001343/**
1344 * skb_gso_segment - Perform segmentation on skb.
1345 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001346 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001347 *
1348 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001349 *
1350 * It may return NULL if the skb requires no segmentation. This is
1351 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001352 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001353struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001354{
1355 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1356 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001357 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001358 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001359
1360 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001361
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001362 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001363 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001364 __skb_pull(skb, skb->mac_len);
1365
Herbert Xuf9d106a2007-04-23 22:36:13 -07001366 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001367 if (skb_header_cloned(skb) &&
1368 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1369 return ERR_PTR(err);
1370 }
1371
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001372 rcu_read_lock();
1373 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1374 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001375 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001376 err = ptype->gso_send_check(skb);
1377 segs = ERR_PTR(err);
1378 if (err || skb_gso_ok(skb, features))
1379 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001380 __skb_push(skb, (skb->data -
1381 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001382 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001383 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001384 break;
1385 }
1386 }
1387 rcu_read_unlock();
1388
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001389 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001390
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001391 return segs;
1392}
1393
1394EXPORT_SYMBOL(skb_gso_segment);
1395
Herbert Xufb286bb2005-11-10 13:01:24 -08001396/* Take action when hardware reception checksum errors are detected. */
1397#ifdef CONFIG_BUG
1398void netdev_rx_csum_fault(struct net_device *dev)
1399{
1400 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001401 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001402 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001403 dump_stack();
1404 }
1405}
1406EXPORT_SYMBOL(netdev_rx_csum_fault);
1407#endif
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409/* Actually, we should eliminate this check as soon as we know, that:
1410 * 1. IOMMU is present and allows to map all the memory.
1411 * 2. No high memory really exists on this machine.
1412 */
1413
1414static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1415{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001416#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 int i;
1418
1419 if (dev->features & NETIF_F_HIGHDMA)
1420 return 0;
1421
1422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1423 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1424 return 1;
1425
Herbert Xu3d3a8532006-06-27 13:33:10 -07001426#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 return 0;
1428}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001430struct dev_gso_cb {
1431 void (*destructor)(struct sk_buff *skb);
1432};
1433
1434#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1435
1436static void dev_gso_skb_destructor(struct sk_buff *skb)
1437{
1438 struct dev_gso_cb *cb;
1439
1440 do {
1441 struct sk_buff *nskb = skb->next;
1442
1443 skb->next = nskb->next;
1444 nskb->next = NULL;
1445 kfree_skb(nskb);
1446 } while (skb->next);
1447
1448 cb = DEV_GSO_CB(skb);
1449 if (cb->destructor)
1450 cb->destructor(skb);
1451}
1452
1453/**
1454 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1455 * @skb: buffer to segment
1456 *
1457 * This function segments the given skb and stores the list of segments
1458 * in skb->next.
1459 */
1460static int dev_gso_segment(struct sk_buff *skb)
1461{
1462 struct net_device *dev = skb->dev;
1463 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001464 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1465 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001466
Herbert Xu576a30e2006-06-27 13:22:38 -07001467 segs = skb_gso_segment(skb, features);
1468
1469 /* Verifying header integrity only. */
1470 if (!segs)
1471 return 0;
1472
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001473 if (unlikely(IS_ERR(segs)))
1474 return PTR_ERR(segs);
1475
1476 skb->next = segs;
1477 DEV_GSO_CB(skb)->destructor = skb->destructor;
1478 skb->destructor = dev_gso_skb_destructor;
1479
1480 return 0;
1481}
1482
1483int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1484{
1485 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001486 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001487 dev_queue_xmit_nit(skb, dev);
1488
Herbert Xu576a30e2006-06-27 13:22:38 -07001489 if (netif_needs_gso(dev, skb)) {
1490 if (unlikely(dev_gso_segment(skb)))
1491 goto out_kfree_skb;
1492 if (skb->next)
1493 goto gso;
1494 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001495
Herbert Xu576a30e2006-06-27 13:22:38 -07001496 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001497 }
1498
Herbert Xu576a30e2006-06-27 13:22:38 -07001499gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001500 do {
1501 struct sk_buff *nskb = skb->next;
1502 int rc;
1503
1504 skb->next = nskb->next;
1505 nskb->next = NULL;
1506 rc = dev->hard_start_xmit(nskb, dev);
1507 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001508 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001509 skb->next = nskb;
1510 return rc;
1511 }
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001512 if (unlikely((netif_queue_stopped(dev) ||
1513 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1514 skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001515 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001516 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001517
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001518 skb->destructor = DEV_GSO_CB(skb)->destructor;
1519
1520out_kfree_skb:
1521 kfree_skb(skb);
1522 return 0;
1523}
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525#define HARD_TX_LOCK(dev, cpu) { \
1526 if ((dev->features & NETIF_F_LLTX) == 0) { \
Herbert Xu932ff272006-06-09 12:20:56 -07001527 netif_tx_lock(dev); \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 } \
1529}
1530
1531#define HARD_TX_UNLOCK(dev) { \
1532 if ((dev->features & NETIF_F_LLTX) == 0) { \
Herbert Xu932ff272006-06-09 12:20:56 -07001533 netif_tx_unlock(dev); \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 } \
1535}
1536
1537/**
1538 * dev_queue_xmit - transmit a buffer
1539 * @skb: buffer to transmit
1540 *
1541 * Queue a buffer for transmission to a network device. The caller must
1542 * have set the device and priority and built the buffer before calling
1543 * this function. The function can be called from an interrupt.
1544 *
1545 * A negative errno code is returned on a failure. A success does not
1546 * guarantee the frame will be transmitted as it may be dropped due
1547 * to congestion or traffic shaping.
Ben Greearaf191362005-04-24 20:12:36 -07001548 *
1549 * -----------------------------------------------------------------------------------
1550 * I notice this method can also return errors from the queue disciplines,
1551 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1552 * be positive.
1553 *
1554 * Regardless of the return value, the skb is consumed, so it is currently
1555 * difficult to retry a send to this method. (You can bump the ref count
1556 * before sending to hold a reference for retry if you are careful.)
1557 *
1558 * When calling this method, interrupts MUST be enabled. This is because
1559 * the BH enable code must have IRQs enabled so that it will not deadlock.
1560 * --BLG
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 */
1562
1563int dev_queue_xmit(struct sk_buff *skb)
1564{
1565 struct net_device *dev = skb->dev;
1566 struct Qdisc *q;
1567 int rc = -ENOMEM;
1568
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001569 /* GSO will handle the following emulations directly. */
1570 if (netif_needs_gso(dev, skb))
1571 goto gso;
1572
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 if (skb_shinfo(skb)->frag_list &&
1574 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001575 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 goto out_kfree_skb;
1577
1578 /* Fragmented skb is linearized if device does not support SG,
1579 * or if at least one of fragments is in highmem and device
1580 * does not support DMA from it.
1581 */
1582 if (skb_shinfo(skb)->nr_frags &&
1583 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001584 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 goto out_kfree_skb;
1586
1587 /* If packet is not checksummed and device does not support
1588 * checksumming for this protocol, complete checksumming here.
1589 */
Herbert Xu663ead32007-04-09 11:59:07 -07001590 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1591 skb_set_transport_header(skb, skb->csum_start -
1592 skb_headroom(skb));
1593
Herbert Xua2988302007-06-28 13:44:37 -07001594 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1595 !((dev->features & NETIF_F_IP_CSUM) &&
1596 skb->protocol == htons(ETH_P_IP)) &&
1597 !((dev->features & NETIF_F_IPV6_CSUM) &&
1598 skb->protocol == htons(ETH_P_IPV6)))
Herbert Xu663ead32007-04-09 11:59:07 -07001599 if (skb_checksum_help(skb))
1600 goto out_kfree_skb;
1601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001603gso:
Eric Dumazet2d7ceec2005-09-27 15:22:58 -07001604 spin_lock_prefetch(&dev->queue_lock);
1605
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001606 /* Disable soft irqs for various locks below. Also
1607 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001609 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001611 /* Updates of qdisc are serialized by queue_lock.
1612 * The struct Qdisc which is pointed to by qdisc is now a
1613 * rcu structure - it may be accessed without acquiring
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 * a lock (but the structure may be stale.) The freeing of the
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001615 * qdisc will be deferred until it's known that there are no
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 * more references to it.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001617 *
1618 * If the qdisc has an enqueue function, we still need to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 * hold the queue_lock before calling it, since queue_lock
1620 * also serializes access to the device queue.
1621 */
1622
1623 q = rcu_dereference(dev->qdisc);
1624#ifdef CONFIG_NET_CLS_ACT
1625 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1626#endif
1627 if (q->enqueue) {
1628 /* Grab device queue */
1629 spin_lock(&dev->queue_lock);
Patrick McHardy85670cc2006-09-27 16:45:45 -07001630 q = dev->qdisc;
1631 if (q->enqueue) {
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001632 /* reset queue_mapping to zero */
1633 skb->queue_mapping = 0;
Patrick McHardy85670cc2006-09-27 16:45:45 -07001634 rc = q->enqueue(skb, q);
1635 qdisc_run(dev);
1636 spin_unlock(&dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
Patrick McHardy85670cc2006-09-27 16:45:45 -07001638 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1639 goto out;
1640 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 spin_unlock(&dev->queue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 }
1643
1644 /* The device has no queue. Common case for software devices:
1645 loopback, all the sorts of tunnels...
1646
Herbert Xu932ff272006-06-09 12:20:56 -07001647 Really, it is unlikely that netif_tx_lock protection is necessary
1648 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 counters.)
1650 However, it is possible, that they rely on protection
1651 made by us here.
1652
1653 Check this and shot the lock. It is not prone from deadlocks.
1654 Either shot noqueue qdisc, it is even simpler 8)
1655 */
1656 if (dev->flags & IFF_UP) {
1657 int cpu = smp_processor_id(); /* ok because BHs are off */
1658
1659 if (dev->xmit_lock_owner != cpu) {
1660
1661 HARD_TX_LOCK(dev, cpu);
1662
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001663 if (!netif_queue_stopped(dev) &&
1664 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 rc = 0;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001666 if (!dev_hard_start_xmit(skb, dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 HARD_TX_UNLOCK(dev);
1668 goto out;
1669 }
1670 }
1671 HARD_TX_UNLOCK(dev);
1672 if (net_ratelimit())
1673 printk(KERN_CRIT "Virtual device %s asks to "
1674 "queue packet!\n", dev->name);
1675 } else {
1676 /* Recursion is detected! It is possible,
1677 * unfortunately */
1678 if (net_ratelimit())
1679 printk(KERN_CRIT "Dead loop on virtual device "
1680 "%s, fix it urgently!\n", dev->name);
1681 }
1682 }
1683
1684 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001685 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687out_kfree_skb:
1688 kfree_skb(skb);
1689 return rc;
1690out:
Herbert Xud4828d82006-06-22 02:28:18 -07001691 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return rc;
1693}
1694
1695
1696/*=======================================================================
1697 Receiver routines
1698 =======================================================================*/
1699
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001700int netdev_max_backlog __read_mostly = 1000;
1701int netdev_budget __read_mostly = 300;
1702int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1705
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707/**
1708 * netif_rx - post buffer to the network code
1709 * @skb: buffer to post
1710 *
1711 * This function receives a packet from a device driver and queues it for
1712 * the upper (protocol) levels to process. It always succeeds. The buffer
1713 * may be dropped during processing for congestion control or by the
1714 * protocol layers.
1715 *
1716 * return values:
1717 * NET_RX_SUCCESS (no congestion)
1718 * NET_RX_CN_LOW (low congestion)
1719 * NET_RX_CN_MOD (moderate congestion)
1720 * NET_RX_CN_HIGH (high congestion)
1721 * NET_RX_DROP (packet was dropped)
1722 *
1723 */
1724
1725int netif_rx(struct sk_buff *skb)
1726{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 struct softnet_data *queue;
1728 unsigned long flags;
1729
1730 /* if netpoll wants it, pretend we never saw it */
1731 if (netpoll_rx(skb))
1732 return NET_RX_DROP;
1733
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001734 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001735 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
1737 /*
1738 * The code is rearranged so that the path is the most
1739 * short when CPU is congested, but is still operating.
1740 */
1741 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 queue = &__get_cpu_var(softnet_data);
1743
1744 __get_cpu_var(netdev_rx_stat).total++;
1745 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1746 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747enqueue:
1748 dev_hold(skb->dev);
1749 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001751 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 }
1753
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001754 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 goto enqueue;
1756 }
1757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 __get_cpu_var(netdev_rx_stat).dropped++;
1759 local_irq_restore(flags);
1760
1761 kfree_skb(skb);
1762 return NET_RX_DROP;
1763}
1764
1765int netif_rx_ni(struct sk_buff *skb)
1766{
1767 int err;
1768
1769 preempt_disable();
1770 err = netif_rx(skb);
1771 if (local_softirq_pending())
1772 do_softirq();
1773 preempt_enable();
1774
1775 return err;
1776}
1777
1778EXPORT_SYMBOL(netif_rx_ni);
1779
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001780static inline struct net_device *skb_bond(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
1782 struct net_device *dev = skb->dev;
1783
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001784 if (dev->master) {
David S. Miller7ea49ed2006-08-14 17:08:36 -07001785 if (skb_bond_should_drop(skb)) {
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001786 kfree_skb(skb);
1787 return NULL;
1788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 skb->dev = dev->master;
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001790 }
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001791
1792 return dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793}
1794
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796static void net_tx_action(struct softirq_action *h)
1797{
1798 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1799
1800 if (sd->completion_queue) {
1801 struct sk_buff *clist;
1802
1803 local_irq_disable();
1804 clist = sd->completion_queue;
1805 sd->completion_queue = NULL;
1806 local_irq_enable();
1807
1808 while (clist) {
1809 struct sk_buff *skb = clist;
1810 clist = clist->next;
1811
1812 BUG_TRAP(!atomic_read(&skb->users));
1813 __kfree_skb(skb);
1814 }
1815 }
1816
1817 if (sd->output_queue) {
1818 struct net_device *head;
1819
1820 local_irq_disable();
1821 head = sd->output_queue;
1822 sd->output_queue = NULL;
1823 local_irq_enable();
1824
1825 while (head) {
1826 struct net_device *dev = head;
1827 head = head->next_sched;
1828
1829 smp_mb__before_clear_bit();
1830 clear_bit(__LINK_STATE_SCHED, &dev->state);
1831
1832 if (spin_trylock(&dev->queue_lock)) {
1833 qdisc_run(dev);
1834 spin_unlock(&dev->queue_lock);
1835 } else {
1836 netif_schedule(dev);
1837 }
1838 }
1839 }
1840}
1841
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001842static inline int deliver_skb(struct sk_buff *skb,
1843 struct packet_type *pt_prev,
1844 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845{
1846 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001847 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848}
1849
1850#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07001851/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852struct net_bridge;
1853struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1854 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07001855void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Stephen Hemminger6229e362007-03-21 13:38:47 -07001857/*
1858 * If bridge module is loaded call bridging hook.
1859 * returns NULL if packet was consumed.
1860 */
1861struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1862 struct sk_buff *skb) __read_mostly;
1863static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1864 struct packet_type **pt_prev, int *ret,
1865 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866{
1867 struct net_bridge_port *port;
1868
Stephen Hemminger6229e362007-03-21 13:38:47 -07001869 if (skb->pkt_type == PACKET_LOOPBACK ||
1870 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1871 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07001874 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001876 }
1877
Stephen Hemminger6229e362007-03-21 13:38:47 -07001878 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879}
1880#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07001881#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882#endif
1883
Patrick McHardyb863ceb2007-07-14 18:55:06 -07001884#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1885struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1886EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1887
1888static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1889 struct packet_type **pt_prev,
1890 int *ret,
1891 struct net_device *orig_dev)
1892{
1893 if (skb->dev->macvlan_port == NULL)
1894 return skb;
1895
1896 if (*pt_prev) {
1897 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1898 *pt_prev = NULL;
1899 }
1900 return macvlan_handle_frame_hook(skb);
1901}
1902#else
1903#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1904#endif
1905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906#ifdef CONFIG_NET_CLS_ACT
1907/* TODO: Maybe we should just force sch_ingress to be compiled in
1908 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1909 * a compare and 2 stores extra right now if we dont have it on
1910 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001911 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 * the ingress scheduler, you just cant add policies on ingress.
1913 *
1914 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001915static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 struct Qdisc *q;
1918 struct net_device *dev = skb->dev;
1919 int result = TC_ACT_OK;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (dev->qdisc_ingress) {
1922 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1923 if (MAX_RED_LOOP < ttl++) {
Patrick McHardyc01003c2007-03-29 11:46:52 -07001924 printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n",
1925 skb->iif, skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 return TC_ACT_SHOT;
1927 }
1928
1929 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1930
1931 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
David S. Miller86e65da2005-08-09 19:36:29 -07001932
Patrick McHardyfd44de72007-04-16 17:07:08 -07001933 spin_lock(&dev->ingress_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 if ((q = dev->qdisc_ingress) != NULL)
1935 result = q->enqueue(skb, q);
Patrick McHardyfd44de72007-04-16 17:07:08 -07001936 spin_unlock(&dev->ingress_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 }
1939
1940 return result;
1941}
1942#endif
1943
1944int netif_receive_skb(struct sk_buff *skb)
1945{
1946 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001947 struct net_device *orig_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08001949 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
1951 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001952 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return NET_RX_DROP;
1954
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001955 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001956 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Patrick McHardyc01003c2007-03-29 11:46:52 -07001958 if (!skb->iif)
1959 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07001960
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001961 orig_dev = skb_bond(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001963 if (!orig_dev)
1964 return NET_RX_DROP;
1965
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 __get_cpu_var(netdev_rx_stat).total++;
1967
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001968 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001969 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001970 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 pt_prev = NULL;
1973
1974 rcu_read_lock();
1975
1976#ifdef CONFIG_NET_CLS_ACT
1977 if (skb->tc_verd & TC_NCLS) {
1978 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1979 goto ncls;
1980 }
1981#endif
1982
1983 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1984 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001985 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001986 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 pt_prev = ptype;
1988 }
1989 }
1990
1991#ifdef CONFIG_NET_CLS_ACT
1992 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001993 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 pt_prev = NULL; /* noone else should process this after*/
1995 } else {
1996 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1997 }
1998
1999 ret = ing_filter(skb);
2000
2001 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
2002 kfree_skb(skb);
2003 goto out;
2004 }
2005
2006 skb->tc_verd = 0;
2007ncls:
2008#endif
2009
Stephen Hemminger6229e362007-03-21 13:38:47 -07002010 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2011 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002013 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2014 if (!skb)
2015 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 type = skb->protocol;
2018 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
2019 if (ptype->type == type &&
2020 (!ptype->dev || ptype->dev == skb->dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002021 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002022 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 pt_prev = ptype;
2024 }
2025 }
2026
2027 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002028 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 } else {
2030 kfree_skb(skb);
2031 /* Jamal, now you will not able to escape explaining
2032 * me how you were going to use this. :-)
2033 */
2034 ret = NET_RX_DROP;
2035 }
2036
2037out:
2038 rcu_read_unlock();
2039 return ret;
2040}
2041
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002042static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
2044 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2046 unsigned long start_time = jiffies;
2047
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002048 napi->weight = weight_p;
2049 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 struct sk_buff *skb;
2051 struct net_device *dev;
2052
2053 local_irq_disable();
2054 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002055 if (!skb) {
2056 __napi_complete(napi);
2057 local_irq_enable();
2058 break;
2059 }
2060
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 local_irq_enable();
2062
2063 dev = skb->dev;
2064
2065 netif_receive_skb(skb);
2066
2067 dev_put(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002068 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002070 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002073/**
2074 * __napi_schedule - schedule for receive
2075 * @napi: entry to schedule
2076 *
2077 * The entry's receive function will be scheduled to run
2078 */
2079void fastcall __napi_schedule(struct napi_struct *n)
2080{
2081 unsigned long flags;
2082
2083 local_irq_save(flags);
2084 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2085 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2086 local_irq_restore(flags);
2087}
2088EXPORT_SYMBOL(__napi_schedule);
2089
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091static void net_rx_action(struct softirq_action *h)
2092{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002093 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002095 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002096 void *have;
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 local_irq_disable();
2099
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002100 while (!list_empty(list)) {
2101 struct napi_struct *n;
2102 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002104 /* If softirq window is exhuasted then punt.
2105 *
2106 * Note that this is a slight policy change from the
2107 * previous NAPI code, which would allow up to 2
2108 * jiffies to pass before breaking out. The test
2109 * used to be "jiffies - start_time > 1".
2110 */
2111 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 goto softnet_break;
2113
2114 local_irq_enable();
2115
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002116 /* Even though interrupts have been re-enabled, this
2117 * access is safe because interrupts can only add new
2118 * entries to the tail of this list, and only ->poll()
2119 * calls can remove this head entry from the list.
2120 */
2121 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002123 have = netpoll_poll_lock(n);
2124
2125 weight = n->weight;
2126
2127 work = n->poll(n, weight);
2128
2129 WARN_ON_ONCE(work > weight);
2130
2131 budget -= work;
2132
2133 local_irq_disable();
2134
2135 /* Drivers must not modify the NAPI state if they
2136 * consume the entire weight. In such cases this code
2137 * still "owns" the NAPI instance and therefore can
2138 * move the instance around on the list at-will.
2139 */
2140 if (unlikely(work == weight))
2141 list_move_tail(&n->poll_list, list);
2142
2143 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 }
2145out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002146 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002147
Chris Leechdb217332006-06-17 21:24:58 -07002148#ifdef CONFIG_NET_DMA
2149 /*
2150 * There may not be any more sk_buffs coming right now, so push
2151 * any pending DMA copies to hardware
2152 */
Dan Williamsd379b012007-07-09 11:56:42 -07002153 if (!cpus_empty(net_dma.channel_mask)) {
2154 int chan_idx;
2155 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2156 struct dma_chan *chan = net_dma.channels[chan_idx];
2157 if (chan)
2158 dma_async_memcpy_issue_pending(chan);
2159 }
Chris Leechdb217332006-06-17 21:24:58 -07002160 }
2161#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 return;
2164
2165softnet_break:
2166 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2167 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2168 goto out;
2169}
2170
2171static gifconf_func_t * gifconf_list [NPROTO];
2172
2173/**
2174 * register_gifconf - register a SIOCGIF handler
2175 * @family: Address family
2176 * @gifconf: Function handler
2177 *
2178 * Register protocol dependent address dumping routines. The handler
2179 * that is passed must not be freed or reused until it has been replaced
2180 * by another handler.
2181 */
2182int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2183{
2184 if (family >= NPROTO)
2185 return -EINVAL;
2186 gifconf_list[family] = gifconf;
2187 return 0;
2188}
2189
2190
2191/*
2192 * Map an interface index to its name (SIOCGIFNAME)
2193 */
2194
2195/*
2196 * We need this ioctl for efficient implementation of the
2197 * if_indextoname() function required by the IPv6 API. Without
2198 * it, we would have to search all the interfaces to find a
2199 * match. --pb
2200 */
2201
Eric W. Biederman881d9662007-09-17 11:56:21 -07002202static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203{
2204 struct net_device *dev;
2205 struct ifreq ifr;
2206
2207 /*
2208 * Fetch the caller's info block.
2209 */
2210
2211 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2212 return -EFAULT;
2213
2214 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002215 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 if (!dev) {
2217 read_unlock(&dev_base_lock);
2218 return -ENODEV;
2219 }
2220
2221 strcpy(ifr.ifr_name, dev->name);
2222 read_unlock(&dev_base_lock);
2223
2224 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2225 return -EFAULT;
2226 return 0;
2227}
2228
2229/*
2230 * Perform a SIOCGIFCONF call. This structure will change
2231 * size eventually, and there is nothing I can do about it.
2232 * Thus we will need a 'compatibility mode'.
2233 */
2234
Eric W. Biederman881d9662007-09-17 11:56:21 -07002235static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236{
2237 struct ifconf ifc;
2238 struct net_device *dev;
2239 char __user *pos;
2240 int len;
2241 int total;
2242 int i;
2243
2244 /*
2245 * Fetch the caller's info block.
2246 */
2247
2248 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2249 return -EFAULT;
2250
2251 pos = ifc.ifc_buf;
2252 len = ifc.ifc_len;
2253
2254 /*
2255 * Loop over the interfaces, and write an info block for each.
2256 */
2257
2258 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002259 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 for (i = 0; i < NPROTO; i++) {
2261 if (gifconf_list[i]) {
2262 int done;
2263 if (!pos)
2264 done = gifconf_list[i](dev, NULL, 0);
2265 else
2266 done = gifconf_list[i](dev, pos + total,
2267 len - total);
2268 if (done < 0)
2269 return -EFAULT;
2270 total += done;
2271 }
2272 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
2275 /*
2276 * All done. Write the updated control block back to the caller.
2277 */
2278 ifc.ifc_len = total;
2279
2280 /*
2281 * Both BSD and Solaris return 0 here, so we do too.
2282 */
2283 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2284}
2285
2286#ifdef CONFIG_PROC_FS
2287/*
2288 * This is invoked by the /proc filesystem handler to display a device
2289 * in detail.
2290 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2292{
Eric W. Biederman881d9662007-09-17 11:56:21 -07002293 struct net *net = seq->private;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002294 loff_t off;
2295 struct net_device *dev;
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002298 if (!*pos)
2299 return SEQ_START_TOKEN;
2300
2301 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002302 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002303 if (off++ == *pos)
2304 return dev;
2305
2306 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308
2309void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2310{
Eric W. Biederman881d9662007-09-17 11:56:21 -07002311 struct net *net = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002313 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002314 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315}
2316
2317void dev_seq_stop(struct seq_file *seq, void *v)
2318{
2319 read_unlock(&dev_base_lock);
2320}
2321
2322static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2323{
Rusty Russellc45d2862007-03-28 14:29:08 -07002324 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Rusty Russell5a1b5892007-04-28 21:04:03 -07002326 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2327 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2328 dev->name, stats->rx_bytes, stats->rx_packets,
2329 stats->rx_errors,
2330 stats->rx_dropped + stats->rx_missed_errors,
2331 stats->rx_fifo_errors,
2332 stats->rx_length_errors + stats->rx_over_errors +
2333 stats->rx_crc_errors + stats->rx_frame_errors,
2334 stats->rx_compressed, stats->multicast,
2335 stats->tx_bytes, stats->tx_packets,
2336 stats->tx_errors, stats->tx_dropped,
2337 stats->tx_fifo_errors, stats->collisions,
2338 stats->tx_carrier_errors +
2339 stats->tx_aborted_errors +
2340 stats->tx_window_errors +
2341 stats->tx_heartbeat_errors,
2342 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343}
2344
2345/*
2346 * Called from the PROCfs module. This now uses the new arbitrary sized
2347 * /proc/net interface to create /proc/net/dev
2348 */
2349static int dev_seq_show(struct seq_file *seq, void *v)
2350{
2351 if (v == SEQ_START_TOKEN)
2352 seq_puts(seq, "Inter-| Receive "
2353 " | Transmit\n"
2354 " face |bytes packets errs drop fifo frame "
2355 "compressed multicast|bytes packets errs "
2356 "drop fifo colls carrier compressed\n");
2357 else
2358 dev_seq_printf_stats(seq, v);
2359 return 0;
2360}
2361
2362static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2363{
2364 struct netif_rx_stats *rc = NULL;
2365
2366 while (*pos < NR_CPUS)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002367 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 rc = &per_cpu(netdev_rx_stat, *pos);
2369 break;
2370 } else
2371 ++*pos;
2372 return rc;
2373}
2374
2375static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2376{
2377 return softnet_get_online(pos);
2378}
2379
2380static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2381{
2382 ++*pos;
2383 return softnet_get_online(pos);
2384}
2385
2386static void softnet_seq_stop(struct seq_file *seq, void *v)
2387{
2388}
2389
2390static int softnet_seq_show(struct seq_file *seq, void *v)
2391{
2392 struct netif_rx_stats *s = v;
2393
2394 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002395 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002396 0, 0, 0, 0, /* was fastroute */
2397 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 return 0;
2399}
2400
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002401static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 .start = dev_seq_start,
2403 .next = dev_seq_next,
2404 .stop = dev_seq_stop,
2405 .show = dev_seq_show,
2406};
2407
2408static int dev_seq_open(struct inode *inode, struct file *file)
2409{
Eric W. Biederman881d9662007-09-17 11:56:21 -07002410 struct seq_file *seq;
2411 int res;
2412 res = seq_open(file, &dev_seq_ops);
2413 if (!res) {
2414 seq = file->private_data;
2415 seq->private = get_net(PROC_NET(inode));
2416 }
2417 return res;
2418}
2419
2420static int dev_seq_release(struct inode *inode, struct file *file)
2421{
2422 struct seq_file *seq = file->private_data;
2423 struct net *net = seq->private;
2424 put_net(net);
2425 return seq_release(inode, file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426}
2427
Arjan van de Ven9a321442007-02-12 00:55:35 -08002428static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 .owner = THIS_MODULE,
2430 .open = dev_seq_open,
2431 .read = seq_read,
2432 .llseek = seq_lseek,
Eric W. Biederman881d9662007-09-17 11:56:21 -07002433 .release = dev_seq_release,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434};
2435
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002436static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 .start = softnet_seq_start,
2438 .next = softnet_seq_next,
2439 .stop = softnet_seq_stop,
2440 .show = softnet_seq_show,
2441};
2442
2443static int softnet_seq_open(struct inode *inode, struct file *file)
2444{
2445 return seq_open(file, &softnet_seq_ops);
2446}
2447
Arjan van de Ven9a321442007-02-12 00:55:35 -08002448static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 .owner = THIS_MODULE,
2450 .open = softnet_seq_open,
2451 .read = seq_read,
2452 .llseek = seq_lseek,
2453 .release = seq_release,
2454};
2455
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002456static void *ptype_get_idx(loff_t pos)
2457{
2458 struct packet_type *pt = NULL;
2459 loff_t i = 0;
2460 int t;
2461
2462 list_for_each_entry_rcu(pt, &ptype_all, list) {
2463 if (i == pos)
2464 return pt;
2465 ++i;
2466 }
2467
2468 for (t = 0; t < 16; t++) {
2469 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2470 if (i == pos)
2471 return pt;
2472 ++i;
2473 }
2474 }
2475 return NULL;
2476}
2477
2478static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2479{
2480 rcu_read_lock();
2481 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2482}
2483
2484static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2485{
2486 struct packet_type *pt;
2487 struct list_head *nxt;
2488 int hash;
2489
2490 ++*pos;
2491 if (v == SEQ_START_TOKEN)
2492 return ptype_get_idx(0);
2493
2494 pt = v;
2495 nxt = pt->list.next;
2496 if (pt->type == htons(ETH_P_ALL)) {
2497 if (nxt != &ptype_all)
2498 goto found;
2499 hash = 0;
2500 nxt = ptype_base[0].next;
2501 } else
2502 hash = ntohs(pt->type) & 15;
2503
2504 while (nxt == &ptype_base[hash]) {
2505 if (++hash >= 16)
2506 return NULL;
2507 nxt = ptype_base[hash].next;
2508 }
2509found:
2510 return list_entry(nxt, struct packet_type, list);
2511}
2512
2513static void ptype_seq_stop(struct seq_file *seq, void *v)
2514{
2515 rcu_read_unlock();
2516}
2517
2518static void ptype_seq_decode(struct seq_file *seq, void *sym)
2519{
2520#ifdef CONFIG_KALLSYMS
2521 unsigned long offset = 0, symsize;
2522 const char *symname;
2523 char *modname;
2524 char namebuf[128];
2525
2526 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2527 &modname, namebuf);
2528
2529 if (symname) {
2530 char *delim = ":";
2531
2532 if (!modname)
2533 modname = delim = "";
2534 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2535 symname, offset);
2536 return;
2537 }
2538#endif
2539
2540 seq_printf(seq, "[%p]", sym);
2541}
2542
2543static int ptype_seq_show(struct seq_file *seq, void *v)
2544{
2545 struct packet_type *pt = v;
2546
2547 if (v == SEQ_START_TOKEN)
2548 seq_puts(seq, "Type Device Function\n");
2549 else {
2550 if (pt->type == htons(ETH_P_ALL))
2551 seq_puts(seq, "ALL ");
2552 else
2553 seq_printf(seq, "%04x", ntohs(pt->type));
2554
2555 seq_printf(seq, " %-8s ",
2556 pt->dev ? pt->dev->name : "");
2557 ptype_seq_decode(seq, pt->func);
2558 seq_putc(seq, '\n');
2559 }
2560
2561 return 0;
2562}
2563
2564static const struct seq_operations ptype_seq_ops = {
2565 .start = ptype_seq_start,
2566 .next = ptype_seq_next,
2567 .stop = ptype_seq_stop,
2568 .show = ptype_seq_show,
2569};
2570
2571static int ptype_seq_open(struct inode *inode, struct file *file)
2572{
2573 return seq_open(file, &ptype_seq_ops);
2574}
2575
2576static const struct file_operations ptype_seq_fops = {
2577 .owner = THIS_MODULE,
2578 .open = ptype_seq_open,
2579 .read = seq_read,
2580 .llseek = seq_lseek,
2581 .release = seq_release,
2582};
2583
2584
Eric W. Biederman881d9662007-09-17 11:56:21 -07002585static int dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586{
2587 int rc = -ENOMEM;
2588
Eric W. Biederman881d9662007-09-17 11:56:21 -07002589 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002591 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002593 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002594 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002595
Eric W. Biederman881d9662007-09-17 11:56:21 -07002596 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002597 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 rc = 0;
2599out:
2600 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002601out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002602 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002604 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002606 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 goto out;
2608}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002609
2610static void dev_proc_net_exit(struct net *net)
2611{
2612 wext_proc_exit(net);
2613
2614 proc_net_remove(net, "ptype");
2615 proc_net_remove(net, "softnet_stat");
2616 proc_net_remove(net, "dev");
2617}
2618
2619static struct pernet_operations dev_proc_ops = {
2620 .init = dev_proc_net_init,
2621 .exit = dev_proc_net_exit,
2622};
2623
2624static int __init dev_proc_init(void)
2625{
2626 return register_pernet_subsys(&dev_proc_ops);
2627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628#else
2629#define dev_proc_init() 0
2630#endif /* CONFIG_PROC_FS */
2631
2632
2633/**
2634 * netdev_set_master - set up master/slave pair
2635 * @slave: slave device
2636 * @master: new master device
2637 *
2638 * Changes the master device of the slave. Pass %NULL to break the
2639 * bonding. The caller must hold the RTNL semaphore. On a failure
2640 * a negative errno code is returned. On success the reference counts
2641 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2642 * function returns zero.
2643 */
2644int netdev_set_master(struct net_device *slave, struct net_device *master)
2645{
2646 struct net_device *old = slave->master;
2647
2648 ASSERT_RTNL();
2649
2650 if (master) {
2651 if (old)
2652 return -EBUSY;
2653 dev_hold(master);
2654 }
2655
2656 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002657
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 synchronize_net();
2659
2660 if (old)
2661 dev_put(old);
2662
2663 if (master)
2664 slave->flags |= IFF_SLAVE;
2665 else
2666 slave->flags &= ~IFF_SLAVE;
2667
2668 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2669 return 0;
2670}
2671
Patrick McHardy4417da62007-06-27 01:28:10 -07002672static void __dev_set_promiscuity(struct net_device *dev, int inc)
2673{
2674 unsigned short old_flags = dev->flags;
2675
Patrick McHardy24023452007-07-14 18:51:31 -07002676 ASSERT_RTNL();
2677
Patrick McHardy4417da62007-06-27 01:28:10 -07002678 if ((dev->promiscuity += inc) == 0)
2679 dev->flags &= ~IFF_PROMISC;
2680 else
2681 dev->flags |= IFF_PROMISC;
2682 if (dev->flags != old_flags) {
2683 printk(KERN_INFO "device %s %s promiscuous mode\n",
2684 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2685 "left");
2686 audit_log(current->audit_context, GFP_ATOMIC,
2687 AUDIT_ANOM_PROMISCUOUS,
2688 "dev=%s prom=%d old_prom=%d auid=%u",
2689 dev->name, (dev->flags & IFF_PROMISC),
2690 (old_flags & IFF_PROMISC),
2691 audit_get_loginuid(current->audit_context));
Patrick McHardy24023452007-07-14 18:51:31 -07002692
2693 if (dev->change_rx_flags)
2694 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002695 }
2696}
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698/**
2699 * dev_set_promiscuity - update promiscuity count on a device
2700 * @dev: device
2701 * @inc: modifier
2702 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002703 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 * remains above zero the interface remains promiscuous. Once it hits zero
2705 * the device reverts back to normal filtering operation. A negative inc
2706 * value is used to drop promiscuity on the device.
2707 */
2708void dev_set_promiscuity(struct net_device *dev, int inc)
2709{
2710 unsigned short old_flags = dev->flags;
2711
Patrick McHardy4417da62007-06-27 01:28:10 -07002712 __dev_set_promiscuity(dev, inc);
2713 if (dev->flags != old_flags)
2714 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715}
2716
2717/**
2718 * dev_set_allmulti - update allmulti count on a device
2719 * @dev: device
2720 * @inc: modifier
2721 *
2722 * Add or remove reception of all multicast frames to a device. While the
2723 * count in the device remains above zero the interface remains listening
2724 * to all interfaces. Once it hits zero the device reverts back to normal
2725 * filtering operation. A negative @inc value is used to drop the counter
2726 * when releasing a resource needing all multicasts.
2727 */
2728
2729void dev_set_allmulti(struct net_device *dev, int inc)
2730{
2731 unsigned short old_flags = dev->flags;
2732
Patrick McHardy24023452007-07-14 18:51:31 -07002733 ASSERT_RTNL();
2734
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 dev->flags |= IFF_ALLMULTI;
2736 if ((dev->allmulti += inc) == 0)
2737 dev->flags &= ~IFF_ALLMULTI;
Patrick McHardy24023452007-07-14 18:51:31 -07002738 if (dev->flags ^ old_flags) {
2739 if (dev->change_rx_flags)
2740 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07002741 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07002742 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002743}
2744
2745/*
2746 * Upload unicast and multicast address lists to device and
2747 * configure RX filtering. When the device doesn't support unicast
2748 * filtering it is put in promiscous mode while unicast addresses
2749 * are present.
2750 */
2751void __dev_set_rx_mode(struct net_device *dev)
2752{
2753 /* dev_open will call this function so the list will stay sane. */
2754 if (!(dev->flags&IFF_UP))
2755 return;
2756
2757 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09002758 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07002759
2760 if (dev->set_rx_mode)
2761 dev->set_rx_mode(dev);
2762 else {
2763 /* Unicast addresses changes may only happen under the rtnl,
2764 * therefore calling __dev_set_promiscuity here is safe.
2765 */
2766 if (dev->uc_count > 0 && !dev->uc_promisc) {
2767 __dev_set_promiscuity(dev, 1);
2768 dev->uc_promisc = 1;
2769 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2770 __dev_set_promiscuity(dev, -1);
2771 dev->uc_promisc = 0;
2772 }
2773
2774 if (dev->set_multicast_list)
2775 dev->set_multicast_list(dev);
2776 }
2777}
2778
2779void dev_set_rx_mode(struct net_device *dev)
2780{
2781 netif_tx_lock_bh(dev);
2782 __dev_set_rx_mode(dev);
2783 netif_tx_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784}
2785
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002786int __dev_addr_delete(struct dev_addr_list **list, int *count,
2787 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002788{
2789 struct dev_addr_list *da;
2790
2791 for (; (da = *list) != NULL; list = &da->next) {
2792 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2793 alen == da->da_addrlen) {
2794 if (glbl) {
2795 int old_glbl = da->da_gusers;
2796 da->da_gusers = 0;
2797 if (old_glbl == 0)
2798 break;
2799 }
2800 if (--da->da_users)
2801 return 0;
2802
2803 *list = da->next;
2804 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002805 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07002806 return 0;
2807 }
2808 }
2809 return -ENOENT;
2810}
2811
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002812int __dev_addr_add(struct dev_addr_list **list, int *count,
2813 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002814{
2815 struct dev_addr_list *da;
2816
2817 for (da = *list; da != NULL; da = da->next) {
2818 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2819 da->da_addrlen == alen) {
2820 if (glbl) {
2821 int old_glbl = da->da_gusers;
2822 da->da_gusers = 1;
2823 if (old_glbl)
2824 return 0;
2825 }
2826 da->da_users++;
2827 return 0;
2828 }
2829 }
2830
2831 da = kmalloc(sizeof(*da), GFP_ATOMIC);
2832 if (da == NULL)
2833 return -ENOMEM;
2834 memcpy(da->da_addr, addr, alen);
2835 da->da_addrlen = alen;
2836 da->da_users = 1;
2837 da->da_gusers = glbl ? 1 : 0;
2838 da->next = *list;
2839 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002840 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07002841 return 0;
2842}
2843
Patrick McHardy4417da62007-06-27 01:28:10 -07002844/**
2845 * dev_unicast_delete - Release secondary unicast address.
2846 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07002847 * @addr: address to delete
2848 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07002849 *
2850 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07002851 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07002852 *
2853 * The caller must hold the rtnl_mutex.
2854 */
2855int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2856{
2857 int err;
2858
2859 ASSERT_RTNL();
2860
2861 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002862 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2863 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07002864 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07002865 netif_tx_unlock_bh(dev);
2866 return err;
2867}
2868EXPORT_SYMBOL(dev_unicast_delete);
2869
2870/**
2871 * dev_unicast_add - add a secondary unicast address
2872 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07002873 * @addr: address to delete
2874 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07002875 *
2876 * Add a secondary unicast address to the device or increase
2877 * the reference count if it already exists.
2878 *
2879 * The caller must hold the rtnl_mutex.
2880 */
2881int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2882{
2883 int err;
2884
2885 ASSERT_RTNL();
2886
2887 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002888 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2889 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07002890 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07002891 netif_tx_unlock_bh(dev);
2892 return err;
2893}
2894EXPORT_SYMBOL(dev_unicast_add);
2895
Denis Cheng12972622007-07-18 02:12:56 -07002896static void __dev_addr_discard(struct dev_addr_list **list)
2897{
2898 struct dev_addr_list *tmp;
2899
2900 while (*list != NULL) {
2901 tmp = *list;
2902 *list = tmp->next;
2903 if (tmp->da_users > tmp->da_gusers)
2904 printk("__dev_addr_discard: address leakage! "
2905 "da_users=%d\n", tmp->da_users);
2906 kfree(tmp);
2907 }
2908}
2909
Denis Cheng26cc2522007-07-18 02:12:03 -07002910static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07002911{
2912 netif_tx_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07002913
Patrick McHardy4417da62007-06-27 01:28:10 -07002914 __dev_addr_discard(&dev->uc_list);
2915 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002916
Denis Cheng456ad752007-07-18 02:10:54 -07002917 __dev_addr_discard(&dev->mc_list);
2918 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07002919
Denis Cheng456ad752007-07-18 02:10:54 -07002920 netif_tx_unlock_bh(dev);
2921}
2922
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923unsigned dev_get_flags(const struct net_device *dev)
2924{
2925 unsigned flags;
2926
2927 flags = (dev->flags & ~(IFF_PROMISC |
2928 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08002929 IFF_RUNNING |
2930 IFF_LOWER_UP |
2931 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 (dev->gflags & (IFF_PROMISC |
2933 IFF_ALLMULTI));
2934
Stefan Rompfb00055a2006-03-20 17:09:11 -08002935 if (netif_running(dev)) {
2936 if (netif_oper_up(dev))
2937 flags |= IFF_RUNNING;
2938 if (netif_carrier_ok(dev))
2939 flags |= IFF_LOWER_UP;
2940 if (netif_dormant(dev))
2941 flags |= IFF_DORMANT;
2942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 return flags;
2945}
2946
2947int dev_change_flags(struct net_device *dev, unsigned flags)
2948{
Thomas Graf7c355f52007-06-05 16:03:03 -07002949 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 int old_flags = dev->flags;
2951
Patrick McHardy24023452007-07-14 18:51:31 -07002952 ASSERT_RTNL();
2953
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 /*
2955 * Set the flags on our device.
2956 */
2957
2958 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2959 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2960 IFF_AUTOMEDIA)) |
2961 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2962 IFF_ALLMULTI));
2963
2964 /*
2965 * Load in the correct multicast list now the flags have changed.
2966 */
2967
Patrick McHardy24023452007-07-14 18:51:31 -07002968 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
2969 dev->change_rx_flags(dev, IFF_MULTICAST);
2970
Patrick McHardy4417da62007-06-27 01:28:10 -07002971 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
2973 /*
2974 * Have we downed the interface. We handle IFF_UP ourselves
2975 * according to user attempts to set it, rather than blindly
2976 * setting it.
2977 */
2978
2979 ret = 0;
2980 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2981 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2982
2983 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07002984 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 }
2986
2987 if (dev->flags & IFF_UP &&
2988 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2989 IFF_VOLATILE)))
Alan Sternf07d5b92006-05-09 15:23:03 -07002990 raw_notifier_call_chain(&netdev_chain,
Alan Sterne041c682006-03-27 01:16:30 -08002991 NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
2993 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2994 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2995 dev->gflags ^= IFF_PROMISC;
2996 dev_set_promiscuity(dev, inc);
2997 }
2998
2999 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3000 is important. Some (broken) drivers set IFF_PROMISC, when
3001 IFF_ALLMULTI is requested not asking us and not reporting.
3002 */
3003 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3004 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3005 dev->gflags ^= IFF_ALLMULTI;
3006 dev_set_allmulti(dev, inc);
3007 }
3008
Thomas Graf7c355f52007-06-05 16:03:03 -07003009 /* Exclude state transition flags, already notified */
3010 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3011 if (changes)
3012 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
3014 return ret;
3015}
3016
3017int dev_set_mtu(struct net_device *dev, int new_mtu)
3018{
3019 int err;
3020
3021 if (new_mtu == dev->mtu)
3022 return 0;
3023
3024 /* MTU must be positive. */
3025 if (new_mtu < 0)
3026 return -EINVAL;
3027
3028 if (!netif_device_present(dev))
3029 return -ENODEV;
3030
3031 err = 0;
3032 if (dev->change_mtu)
3033 err = dev->change_mtu(dev, new_mtu);
3034 else
3035 dev->mtu = new_mtu;
3036 if (!err && dev->flags & IFF_UP)
Alan Sternf07d5b92006-05-09 15:23:03 -07003037 raw_notifier_call_chain(&netdev_chain,
Alan Sterne041c682006-03-27 01:16:30 -08003038 NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 return err;
3040}
3041
3042int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3043{
3044 int err;
3045
3046 if (!dev->set_mac_address)
3047 return -EOPNOTSUPP;
3048 if (sa->sa_family != dev->type)
3049 return -EINVAL;
3050 if (!netif_device_present(dev))
3051 return -ENODEV;
3052 err = dev->set_mac_address(dev, sa);
3053 if (!err)
Alan Sternf07d5b92006-05-09 15:23:03 -07003054 raw_notifier_call_chain(&netdev_chain,
Alan Sterne041c682006-03-27 01:16:30 -08003055 NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 return err;
3057}
3058
3059/*
3060 * Perform the SIOCxIFxxx calls.
3061 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003062static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063{
3064 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003065 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066
3067 if (!dev)
3068 return -ENODEV;
3069
3070 switch (cmd) {
3071 case SIOCGIFFLAGS: /* Get interface flags */
3072 ifr->ifr_flags = dev_get_flags(dev);
3073 return 0;
3074
3075 case SIOCSIFFLAGS: /* Set interface flags */
3076 return dev_change_flags(dev, ifr->ifr_flags);
3077
3078 case SIOCGIFMETRIC: /* Get the metric on the interface
3079 (currently unused) */
3080 ifr->ifr_metric = 0;
3081 return 0;
3082
3083 case SIOCSIFMETRIC: /* Set the metric on the interface
3084 (currently unused) */
3085 return -EOPNOTSUPP;
3086
3087 case SIOCGIFMTU: /* Get the MTU of a device */
3088 ifr->ifr_mtu = dev->mtu;
3089 return 0;
3090
3091 case SIOCSIFMTU: /* Set the MTU of a device */
3092 return dev_set_mtu(dev, ifr->ifr_mtu);
3093
3094 case SIOCGIFHWADDR:
3095 if (!dev->addr_len)
3096 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3097 else
3098 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3099 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3100 ifr->ifr_hwaddr.sa_family = dev->type;
3101 return 0;
3102
3103 case SIOCSIFHWADDR:
3104 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3105
3106 case SIOCSIFHWBROADCAST:
3107 if (ifr->ifr_hwaddr.sa_family != dev->type)
3108 return -EINVAL;
3109 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3110 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Alan Sternf07d5b92006-05-09 15:23:03 -07003111 raw_notifier_call_chain(&netdev_chain,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112 NETDEV_CHANGEADDR, dev);
3113 return 0;
3114
3115 case SIOCGIFMAP:
3116 ifr->ifr_map.mem_start = dev->mem_start;
3117 ifr->ifr_map.mem_end = dev->mem_end;
3118 ifr->ifr_map.base_addr = dev->base_addr;
3119 ifr->ifr_map.irq = dev->irq;
3120 ifr->ifr_map.dma = dev->dma;
3121 ifr->ifr_map.port = dev->if_port;
3122 return 0;
3123
3124 case SIOCSIFMAP:
3125 if (dev->set_config) {
3126 if (!netif_device_present(dev))
3127 return -ENODEV;
3128 return dev->set_config(dev, &ifr->ifr_map);
3129 }
3130 return -EOPNOTSUPP;
3131
3132 case SIOCADDMULTI:
3133 if (!dev->set_multicast_list ||
3134 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3135 return -EINVAL;
3136 if (!netif_device_present(dev))
3137 return -ENODEV;
3138 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3139 dev->addr_len, 1);
3140
3141 case SIOCDELMULTI:
3142 if (!dev->set_multicast_list ||
3143 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3144 return -EINVAL;
3145 if (!netif_device_present(dev))
3146 return -ENODEV;
3147 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3148 dev->addr_len, 1);
3149
3150 case SIOCGIFINDEX:
3151 ifr->ifr_ifindex = dev->ifindex;
3152 return 0;
3153
3154 case SIOCGIFTXQLEN:
3155 ifr->ifr_qlen = dev->tx_queue_len;
3156 return 0;
3157
3158 case SIOCSIFTXQLEN:
3159 if (ifr->ifr_qlen < 0)
3160 return -EINVAL;
3161 dev->tx_queue_len = ifr->ifr_qlen;
3162 return 0;
3163
3164 case SIOCSIFNAME:
3165 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3166 return dev_change_name(dev, ifr->ifr_newname);
3167
3168 /*
3169 * Unknown or private ioctl
3170 */
3171
3172 default:
3173 if ((cmd >= SIOCDEVPRIVATE &&
3174 cmd <= SIOCDEVPRIVATE + 15) ||
3175 cmd == SIOCBONDENSLAVE ||
3176 cmd == SIOCBONDRELEASE ||
3177 cmd == SIOCBONDSETHWADDR ||
3178 cmd == SIOCBONDSLAVEINFOQUERY ||
3179 cmd == SIOCBONDINFOQUERY ||
3180 cmd == SIOCBONDCHANGEACTIVE ||
3181 cmd == SIOCGMIIPHY ||
3182 cmd == SIOCGMIIREG ||
3183 cmd == SIOCSMIIREG ||
3184 cmd == SIOCBRADDIF ||
3185 cmd == SIOCBRDELIF ||
3186 cmd == SIOCWANDEV) {
3187 err = -EOPNOTSUPP;
3188 if (dev->do_ioctl) {
3189 if (netif_device_present(dev))
3190 err = dev->do_ioctl(dev, ifr,
3191 cmd);
3192 else
3193 err = -ENODEV;
3194 }
3195 } else
3196 err = -EINVAL;
3197
3198 }
3199 return err;
3200}
3201
3202/*
3203 * This function handles all "interface"-type I/O control requests. The actual
3204 * 'doing' part of this is dev_ifsioc above.
3205 */
3206
3207/**
3208 * dev_ioctl - network device ioctl
3209 * @cmd: command to issue
3210 * @arg: pointer to a struct ifreq in user space
3211 *
3212 * Issue ioctl functions to devices. This is normally called by the
3213 * user space syscall interfaces but can sometimes be useful for
3214 * other purposes. The return value is the return from the syscall if
3215 * positive or a negative errno code on error.
3216 */
3217
Eric W. Biederman881d9662007-09-17 11:56:21 -07003218int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219{
3220 struct ifreq ifr;
3221 int ret;
3222 char *colon;
3223
3224 /* One special case: SIOCGIFCONF takes ifconf argument
3225 and requires shared lock, because it sleeps writing
3226 to user space.
3227 */
3228
3229 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003230 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003231 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003232 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 return ret;
3234 }
3235 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003236 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237
3238 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3239 return -EFAULT;
3240
3241 ifr.ifr_name[IFNAMSIZ-1] = 0;
3242
3243 colon = strchr(ifr.ifr_name, ':');
3244 if (colon)
3245 *colon = 0;
3246
3247 /*
3248 * See which interface the caller is talking about.
3249 */
3250
3251 switch (cmd) {
3252 /*
3253 * These ioctl calls:
3254 * - can be done by all.
3255 * - atomic and do not require locking.
3256 * - return a value
3257 */
3258 case SIOCGIFFLAGS:
3259 case SIOCGIFMETRIC:
3260 case SIOCGIFMTU:
3261 case SIOCGIFHWADDR:
3262 case SIOCGIFSLAVE:
3263 case SIOCGIFMAP:
3264 case SIOCGIFINDEX:
3265 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003266 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07003268 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269 read_unlock(&dev_base_lock);
3270 if (!ret) {
3271 if (colon)
3272 *colon = ':';
3273 if (copy_to_user(arg, &ifr,
3274 sizeof(struct ifreq)))
3275 ret = -EFAULT;
3276 }
3277 return ret;
3278
3279 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003280 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003282 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 rtnl_unlock();
3284 if (!ret) {
3285 if (colon)
3286 *colon = ':';
3287 if (copy_to_user(arg, &ifr,
3288 sizeof(struct ifreq)))
3289 ret = -EFAULT;
3290 }
3291 return ret;
3292
3293 /*
3294 * These ioctl calls:
3295 * - require superuser power.
3296 * - require strict serialization.
3297 * - return a value
3298 */
3299 case SIOCGMIIPHY:
3300 case SIOCGMIIREG:
3301 case SIOCSIFNAME:
3302 if (!capable(CAP_NET_ADMIN))
3303 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003304 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003306 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 rtnl_unlock();
3308 if (!ret) {
3309 if (colon)
3310 *colon = ':';
3311 if (copy_to_user(arg, &ifr,
3312 sizeof(struct ifreq)))
3313 ret = -EFAULT;
3314 }
3315 return ret;
3316
3317 /*
3318 * These ioctl calls:
3319 * - require superuser power.
3320 * - require strict serialization.
3321 * - do not return a value
3322 */
3323 case SIOCSIFFLAGS:
3324 case SIOCSIFMETRIC:
3325 case SIOCSIFMTU:
3326 case SIOCSIFMAP:
3327 case SIOCSIFHWADDR:
3328 case SIOCSIFSLAVE:
3329 case SIOCADDMULTI:
3330 case SIOCDELMULTI:
3331 case SIOCSIFHWBROADCAST:
3332 case SIOCSIFTXQLEN:
3333 case SIOCSMIIREG:
3334 case SIOCBONDENSLAVE:
3335 case SIOCBONDRELEASE:
3336 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 case SIOCBONDCHANGEACTIVE:
3338 case SIOCBRADDIF:
3339 case SIOCBRDELIF:
3340 if (!capable(CAP_NET_ADMIN))
3341 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003342 /* fall through */
3343 case SIOCBONDSLAVEINFOQUERY:
3344 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003345 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003347 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 rtnl_unlock();
3349 return ret;
3350
3351 case SIOCGIFMEM:
3352 /* Get the per device memory space. We can add this but
3353 * currently do not support it */
3354 case SIOCSIFMEM:
3355 /* Set the per device memory buffer space.
3356 * Not applicable in our case */
3357 case SIOCSIFLINK:
3358 return -EINVAL;
3359
3360 /*
3361 * Unknown or private ioctl.
3362 */
3363 default:
3364 if (cmd == SIOCWANDEV ||
3365 (cmd >= SIOCDEVPRIVATE &&
3366 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003367 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003369 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 rtnl_unlock();
3371 if (!ret && copy_to_user(arg, &ifr,
3372 sizeof(struct ifreq)))
3373 ret = -EFAULT;
3374 return ret;
3375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003377 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003378 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 return -EINVAL;
3380 }
3381}
3382
3383
3384/**
3385 * dev_new_index - allocate an ifindex
3386 *
3387 * Returns a suitable unique value for a new device interface
3388 * number. The caller must hold the rtnl semaphore or the
3389 * dev_base_lock to be sure it remains unique.
3390 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003391static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392{
3393 static int ifindex;
3394 for (;;) {
3395 if (++ifindex <= 0)
3396 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003397 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 return ifindex;
3399 }
3400}
3401
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402/* Delayed registration/unregisteration */
3403static DEFINE_SPINLOCK(net_todo_list_lock);
3404static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
3405
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003406static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407{
3408 spin_lock(&net_todo_list_lock);
3409 list_add_tail(&dev->todo_list, &net_todo_list);
3410 spin_unlock(&net_todo_list_lock);
3411}
3412
3413/**
3414 * register_netdevice - register a network device
3415 * @dev: device to register
3416 *
3417 * Take a completed network device structure and add it to the kernel
3418 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3419 * chain. 0 is returned on success. A negative errno code is returned
3420 * on a failure to set up the device, or if the name is a duplicate.
3421 *
3422 * Callers must hold the rtnl semaphore. You may want
3423 * register_netdev() instead of this.
3424 *
3425 * BUGS:
3426 * The locking appears insufficient to guarantee two parallel registers
3427 * will not get the same name.
3428 */
3429
3430int register_netdevice(struct net_device *dev)
3431{
3432 struct hlist_head *head;
3433 struct hlist_node *p;
3434 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003435 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436
3437 BUG_ON(dev_boot_phase);
3438 ASSERT_RTNL();
3439
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003440 might_sleep();
3441
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 /* When net_device's are persistent, this will be fatal. */
3443 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Eric W. Biederman881d9662007-09-17 11:56:21 -07003444 BUG_ON(!dev->nd_net);
3445 net = dev->nd_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446
3447 spin_lock_init(&dev->queue_lock);
Herbert Xu932ff272006-06-09 12:20:56 -07003448 spin_lock_init(&dev->_xmit_lock);
Jarek Poplawski723e98b2007-05-15 22:46:18 -07003449 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 dev->xmit_lock_owner = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 spin_lock_init(&dev->ingress_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 dev->iflink = -1;
3454
3455 /* Init, if this function is available */
3456 if (dev->init) {
3457 ret = dev->init(dev);
3458 if (ret) {
3459 if (ret > 0)
3460 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003461 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 }
3463 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 if (!dev_valid_name(dev->name)) {
3466 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003467 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 }
3469
Eric W. Biederman881d9662007-09-17 11:56:21 -07003470 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 if (dev->iflink == -1)
3472 dev->iflink = dev->ifindex;
3473
3474 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003475 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 hlist_for_each(p, head) {
3477 struct net_device *d
3478 = hlist_entry(p, struct net_device, name_hlist);
3479 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3480 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003481 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003485 /* Fix illegal checksum combinations */
3486 if ((dev->features & NETIF_F_HW_CSUM) &&
3487 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3488 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3489 dev->name);
3490 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3491 }
3492
3493 if ((dev->features & NETIF_F_NO_CSUM) &&
3494 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3495 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3496 dev->name);
3497 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3498 }
3499
3500
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 /* Fix illegal SG+CSUM combinations. */
3502 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003503 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003504 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 dev->name);
3506 dev->features &= ~NETIF_F_SG;
3507 }
3508
3509 /* TSO requires that SG is present as well. */
3510 if ((dev->features & NETIF_F_TSO) &&
3511 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003512 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 dev->name);
3514 dev->features &= ~NETIF_F_TSO;
3515 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003516 if (dev->features & NETIF_F_UFO) {
3517 if (!(dev->features & NETIF_F_HW_CSUM)) {
3518 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3519 "NETIF_F_HW_CSUM feature.\n",
3520 dev->name);
3521 dev->features &= ~NETIF_F_UFO;
3522 }
3523 if (!(dev->features & NETIF_F_SG)) {
3524 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3525 "NETIF_F_SG feature.\n",
3526 dev->name);
3527 dev->features &= ~NETIF_F_UFO;
3528 }
3529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530
3531 /*
3532 * nil rebuild_header routine,
3533 * that should be never called and used as just bug trap.
3534 */
3535
3536 if (!dev->rebuild_header)
3537 dev->rebuild_header = default_rebuild_header;
3538
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003539 ret = netdev_register_sysfs(dev);
3540 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003541 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003542 dev->reg_state = NETREG_REGISTERED;
3543
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 /*
3545 * Default initial state at registry is that the
3546 * device is present.
3547 */
3548
3549 set_bit(__LINK_STATE_PRESENT, &dev->state);
3550
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 dev_init_scheduler(dev);
3552 write_lock_bh(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07003553 list_add_tail(&dev->dev_list, &net->dev_base_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554 hlist_add_head(&dev->name_hlist, head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07003555 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 write_unlock_bh(&dev_base_lock);
3558
3559 /* Notify protocols, that a new device appeared. */
Herbert Xufcc5a032007-07-30 17:03:38 -07003560 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
3561 ret = notifier_to_errno(ret);
3562 if (ret)
3563 unregister_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564
3565out:
3566 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003567
3568err_uninit:
3569 if (dev->uninit)
3570 dev->uninit(dev);
3571 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572}
3573
3574/**
3575 * register_netdev - register a network device
3576 * @dev: device to register
3577 *
3578 * Take a completed network device structure and add it to the kernel
3579 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3580 * chain. 0 is returned on success. A negative errno code is returned
3581 * on a failure to set up the device, or if the name is a duplicate.
3582 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07003583 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 * and expands the device name if you passed a format string to
3585 * alloc_netdev.
3586 */
3587int register_netdev(struct net_device *dev)
3588{
3589 int err;
3590
3591 rtnl_lock();
3592
3593 /*
3594 * If the name is a format string the caller wants us to do a
3595 * name allocation.
3596 */
3597 if (strchr(dev->name, '%')) {
3598 err = dev_alloc_name(dev, dev->name);
3599 if (err < 0)
3600 goto out;
3601 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003602
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 err = register_netdevice(dev);
3604out:
3605 rtnl_unlock();
3606 return err;
3607}
3608EXPORT_SYMBOL(register_netdev);
3609
3610/*
3611 * netdev_wait_allrefs - wait until all references are gone.
3612 *
3613 * This is called when unregistering network devices.
3614 *
3615 * Any protocol or device that holds a reference should register
3616 * for netdevice notification, and cleanup and put back the
3617 * reference if they receive an UNREGISTER event.
3618 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003619 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620 */
3621static void netdev_wait_allrefs(struct net_device *dev)
3622{
3623 unsigned long rebroadcast_time, warning_time;
3624
3625 rebroadcast_time = warning_time = jiffies;
3626 while (atomic_read(&dev->refcnt) != 0) {
3627 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003628 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
3630 /* Rebroadcast unregister notification */
Alan Sternf07d5b92006-05-09 15:23:03 -07003631 raw_notifier_call_chain(&netdev_chain,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 NETDEV_UNREGISTER, dev);
3633
3634 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3635 &dev->state)) {
3636 /* We must not have linkwatch events
3637 * pending on unregister. If this
3638 * happens, we simply run the queue
3639 * unscheduled, resulting in a noop
3640 * for this device.
3641 */
3642 linkwatch_run_queue();
3643 }
3644
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003645 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646
3647 rebroadcast_time = jiffies;
3648 }
3649
3650 msleep(250);
3651
3652 if (time_after(jiffies, warning_time + 10 * HZ)) {
3653 printk(KERN_EMERG "unregister_netdevice: "
3654 "waiting for %s to become free. Usage "
3655 "count = %d\n",
3656 dev->name, atomic_read(&dev->refcnt));
3657 warning_time = jiffies;
3658 }
3659 }
3660}
3661
3662/* The sequence is:
3663 *
3664 * rtnl_lock();
3665 * ...
3666 * register_netdevice(x1);
3667 * register_netdevice(x2);
3668 * ...
3669 * unregister_netdevice(y1);
3670 * unregister_netdevice(y2);
3671 * ...
3672 * rtnl_unlock();
3673 * free_netdev(y1);
3674 * free_netdev(y2);
3675 *
3676 * We are invoked by rtnl_unlock() after it drops the semaphore.
3677 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003678 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 * without deadlocking with linkwatch via keventd.
3680 * 2) Since we run with the RTNL semaphore not held, we can sleep
3681 * safely in order to wait for the netdev refcnt to drop to zero.
3682 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08003683static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684void netdev_run_todo(void)
3685{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07003686 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
3688 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08003689 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690
3691 /* Not safe to do outside the semaphore. We must not return
3692 * until all unregister events invoked by the local processor
3693 * have been completed (either by this todo run, or one on
3694 * another cpu).
3695 */
3696 if (list_empty(&net_todo_list))
3697 goto out;
3698
3699 /* Snapshot list, allow later requests */
3700 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07003701 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07003703
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 while (!list_empty(&list)) {
3705 struct net_device *dev
3706 = list_entry(list.next, struct net_device, todo_list);
3707 list_del(&dev->todo_list);
3708
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003709 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 printk(KERN_ERR "network todo '%s' but state %d\n",
3711 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003712 dump_stack();
3713 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003715
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003716 dev->reg_state = NETREG_UNREGISTERED;
3717
3718 netdev_wait_allrefs(dev);
3719
3720 /* paranoia */
3721 BUG_ON(atomic_read(&dev->refcnt));
3722 BUG_TRAP(!dev->ip_ptr);
3723 BUG_TRAP(!dev->ip6_ptr);
3724 BUG_TRAP(!dev->dn_ptr);
3725
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003726 if (dev->destructor)
3727 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07003728
3729 /* Free network device */
3730 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 }
3732
3733out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08003734 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Rusty Russell5a1b5892007-04-28 21:04:03 -07003737static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07003738{
Rusty Russell5a1b5892007-04-28 21:04:03 -07003739 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07003740}
3741
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003743 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 * @sizeof_priv: size of private data to allocate space for
3745 * @name: device name format string
3746 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003747 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 *
3749 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003750 * and performs basic initialization. Also allocates subquue structs
3751 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003753struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3754 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755{
3756 void *p;
3757 struct net_device *dev;
3758 int alloc_size;
3759
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07003760 BUG_ON(strlen(name) >= sizeof(dev->name));
3761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 /* ensure 32-byte alignment of both the device and private area */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003763 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
Patrick McHardy31ce72a2007-07-20 19:45:45 -07003764 (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003765 ~NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3767
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07003768 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07003770 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 return NULL;
3772 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773
3774 dev = (struct net_device *)
3775 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3776 dev->padded = (char *)dev - (char *)p;
Eric W. Biederman6d34b1c2007-09-12 12:57:33 +02003777 dev->nd_net = &init_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003779 if (sizeof_priv) {
3780 dev->priv = ((char *)dev +
3781 ((sizeof(struct net_device) +
3782 (sizeof(struct net_device_subqueue) *
Patrick McHardy31ce72a2007-07-20 19:45:45 -07003783 (queue_count - 1)) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003784 & ~NETDEV_ALIGN_CONST));
3785 }
3786
3787 dev->egress_subqueue_count = queue_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003788
Rusty Russell5a1b5892007-04-28 21:04:03 -07003789 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003790 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 setup(dev);
3792 strcpy(dev->name, name);
3793 return dev;
3794}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07003795EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
3797/**
3798 * free_netdev - free network device
3799 * @dev: device
3800 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003801 * This function does the last stage of destroying an allocated device
3802 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 * If this is the last reference then it will be freed.
3804 */
3805void free_netdev(struct net_device *dev)
3806{
3807#ifdef CONFIG_SYSFS
Stephen Hemminger3041a062006-05-26 13:25:24 -07003808 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 if (dev->reg_state == NETREG_UNINITIALIZED) {
3810 kfree((char *)dev - dev->padded);
3811 return;
3812 }
3813
3814 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3815 dev->reg_state = NETREG_RELEASED;
3816
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07003817 /* will free via device release */
3818 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819#else
3820 kfree((char *)dev - dev->padded);
3821#endif
3822}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003823
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003825void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826{
3827 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07003828 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829}
3830
3831/**
3832 * unregister_netdevice - remove device from the kernel
3833 * @dev: device
3834 *
3835 * This function shuts down a device interface and removes it
3836 * from the kernel tables. On success 0 is returned, on a failure
3837 * a negative errno code is returned.
3838 *
3839 * Callers must hold the rtnl semaphore. You may want
3840 * unregister_netdev() instead of this.
3841 */
3842
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08003843void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 BUG_ON(dev_boot_phase);
3846 ASSERT_RTNL();
3847
3848 /* Some devices call without registering for initialization unwind. */
3849 if (dev->reg_state == NETREG_UNINITIALIZED) {
3850 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3851 "was registered\n", dev->name, dev);
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08003852
3853 WARN_ON(1);
3854 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855 }
3856
3857 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3858
3859 /* If device is running, close it first. */
3860 if (dev->flags & IFF_UP)
3861 dev_close(dev);
3862
3863 /* And unlink it from device chain. */
Pavel Emelianov7562f872007-05-03 15:13:45 -07003864 write_lock_bh(&dev_base_lock);
3865 list_del(&dev->dev_list);
3866 hlist_del(&dev->name_hlist);
3867 hlist_del(&dev->index_hlist);
3868 write_unlock_bh(&dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869
3870 dev->reg_state = NETREG_UNREGISTERING;
3871
3872 synchronize_net();
3873
3874 /* Shutdown queueing discipline. */
3875 dev_shutdown(dev);
3876
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003877
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 /* Notify protocols, that we are about to destroy
3879 this device. They should clean all the things.
3880 */
Alan Sternf07d5b92006-05-09 15:23:03 -07003881 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003882
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 /*
Patrick McHardy4417da62007-06-27 01:28:10 -07003884 * Flush the unicast and multicast chains
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 */
Denis Cheng26cc2522007-07-18 02:12:03 -07003886 dev_addr_discard(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
3888 if (dev->uninit)
3889 dev->uninit(dev);
3890
3891 /* Notifier chain MUST detach us from master device. */
3892 BUG_TRAP(!dev->master);
3893
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07003894 /* Remove entries from sysfs */
3895 netdev_unregister_sysfs(dev);
3896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 /* Finish processing unregister after unlock */
3898 net_set_todo(dev);
3899
3900 synchronize_net();
3901
3902 dev_put(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903}
3904
3905/**
3906 * unregister_netdev - remove device from the kernel
3907 * @dev: device
3908 *
3909 * This function shuts down a device interface and removes it
3910 * from the kernel tables. On success 0 is returned, on a failure
3911 * a negative errno code is returned.
3912 *
3913 * This is just a wrapper for unregister_netdevice that takes
3914 * the rtnl semaphore. In general you want to use this and not
3915 * unregister_netdevice.
3916 */
3917void unregister_netdev(struct net_device *dev)
3918{
3919 rtnl_lock();
3920 unregister_netdevice(dev);
3921 rtnl_unlock();
3922}
3923
3924EXPORT_SYMBOL(unregister_netdev);
3925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926static int dev_cpu_callback(struct notifier_block *nfb,
3927 unsigned long action,
3928 void *ocpu)
3929{
3930 struct sk_buff **list_skb;
3931 struct net_device **list_net;
3932 struct sk_buff *skb;
3933 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3934 struct softnet_data *sd, *oldsd;
3935
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003936 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 return NOTIFY_OK;
3938
3939 local_irq_disable();
3940 cpu = smp_processor_id();
3941 sd = &per_cpu(softnet_data, cpu);
3942 oldsd = &per_cpu(softnet_data, oldcpu);
3943
3944 /* Find end of our completion_queue. */
3945 list_skb = &sd->completion_queue;
3946 while (*list_skb)
3947 list_skb = &(*list_skb)->next;
3948 /* Append completion queue from offline CPU. */
3949 *list_skb = oldsd->completion_queue;
3950 oldsd->completion_queue = NULL;
3951
3952 /* Find end of our output_queue. */
3953 list_net = &sd->output_queue;
3954 while (*list_net)
3955 list_net = &(*list_net)->next_sched;
3956 /* Append output queue from offline CPU. */
3957 *list_net = oldsd->output_queue;
3958 oldsd->output_queue = NULL;
3959
3960 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3961 local_irq_enable();
3962
3963 /* Process offline CPU's input_pkt_queue */
3964 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3965 netif_rx(skb);
3966
3967 return NOTIFY_OK;
3968}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969
Chris Leechdb217332006-06-17 21:24:58 -07003970#ifdef CONFIG_NET_DMA
3971/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003972 * net_dma_rebalance - try to maintain one DMA channel per CPU
3973 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
3974 *
3975 * This is called when the number of channels allocated to the net_dma client
3976 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07003977 */
Dan Williamsd379b012007-07-09 11:56:42 -07003978
3979static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07003980{
Dan Williamsd379b012007-07-09 11:56:42 -07003981 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07003982 struct dma_chan *chan;
3983
Dan Williamsd379b012007-07-09 11:56:42 -07003984 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07003985 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07003986 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07003987 return;
3988 }
3989
3990 i = 0;
3991 cpu = first_cpu(cpu_online_map);
3992
Dan Williamsd379b012007-07-09 11:56:42 -07003993 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3994 chan = net_dma->channels[chan_idx];
3995
3996 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3997 + (i < (num_online_cpus() %
3998 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07003999
4000 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004001 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004002 cpu = next_cpu(cpu, cpu_online_map);
4003 n--;
4004 }
4005 i++;
4006 }
Chris Leechdb217332006-06-17 21:24:58 -07004007}
4008
4009/**
4010 * netdev_dma_event - event callback for the net_dma_client
4011 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004012 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004013 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004014 */
Dan Williamsd379b012007-07-09 11:56:42 -07004015static enum dma_state_client
4016netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4017 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004018{
Dan Williamsd379b012007-07-09 11:56:42 -07004019 int i, found = 0, pos = -1;
4020 struct net_dma *net_dma =
4021 container_of(client, struct net_dma, client);
4022 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4023
4024 spin_lock(&net_dma->lock);
4025 switch (state) {
4026 case DMA_RESOURCE_AVAILABLE:
4027 for (i = 0; i < NR_CPUS; i++)
4028 if (net_dma->channels[i] == chan) {
4029 found = 1;
4030 break;
4031 } else if (net_dma->channels[i] == NULL && pos < 0)
4032 pos = i;
4033
4034 if (!found && pos >= 0) {
4035 ack = DMA_ACK;
4036 net_dma->channels[pos] = chan;
4037 cpu_set(pos, net_dma->channel_mask);
4038 net_dma_rebalance(net_dma);
4039 }
Chris Leechdb217332006-06-17 21:24:58 -07004040 break;
4041 case DMA_RESOURCE_REMOVED:
Dan Williamsd379b012007-07-09 11:56:42 -07004042 for (i = 0; i < NR_CPUS; i++)
4043 if (net_dma->channels[i] == chan) {
4044 found = 1;
4045 pos = i;
4046 break;
4047 }
4048
4049 if (found) {
4050 ack = DMA_ACK;
4051 cpu_clear(pos, net_dma->channel_mask);
4052 net_dma->channels[i] = NULL;
4053 net_dma_rebalance(net_dma);
4054 }
Chris Leechdb217332006-06-17 21:24:58 -07004055 break;
4056 default:
4057 break;
4058 }
Dan Williamsd379b012007-07-09 11:56:42 -07004059 spin_unlock(&net_dma->lock);
4060
4061 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004062}
4063
4064/**
4065 * netdev_dma_regiser - register the networking subsystem as a DMA client
4066 */
4067static int __init netdev_dma_register(void)
4068{
Dan Williamsd379b012007-07-09 11:56:42 -07004069 spin_lock_init(&net_dma.lock);
4070 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4071 dma_async_client_register(&net_dma.client);
4072 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004073 return 0;
4074}
4075
4076#else
4077static int __init netdev_dma_register(void) { return -ENODEV; }
4078#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079
Herbert Xu7f353bf2007-08-10 15:47:58 -07004080/**
4081 * netdev_compute_feature - compute conjunction of two feature sets
4082 * @all: first feature set
4083 * @one: second feature set
4084 *
4085 * Computes a new feature set after adding a device with feature set
4086 * @one to the master device with current feature set @all. Returns
4087 * the new feature set.
4088 */
4089int netdev_compute_features(unsigned long all, unsigned long one)
4090{
4091 /* if device needs checksumming, downgrade to hw checksumming */
4092 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4093 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4094
4095 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4096 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4097 all ^= NETIF_F_HW_CSUM
4098 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4099
4100 if (one & NETIF_F_GSO)
4101 one |= NETIF_F_GSO_SOFTWARE;
4102 one |= NETIF_F_GSO;
4103
4104 /* If even one device supports robust GSO, enable it for all. */
4105 if (one & NETIF_F_GSO_ROBUST)
4106 all |= NETIF_F_GSO_ROBUST;
4107
4108 all &= one | NETIF_F_LLTX;
4109
4110 if (!(all & NETIF_F_ALL_CSUM))
4111 all &= ~NETIF_F_SG;
4112 if (!(all & NETIF_F_SG))
4113 all &= ~NETIF_F_GSO_MASK;
4114
4115 return all;
4116}
4117EXPORT_SYMBOL(netdev_compute_features);
4118
Eric W. Biederman881d9662007-09-17 11:56:21 -07004119/* Initialize per network namespace state */
4120static int netdev_init(struct net *net)
4121{
4122 int i;
4123 INIT_LIST_HEAD(&net->dev_base_head);
4124 rwlock_init(&dev_base_lock);
4125
4126 net->dev_name_head = kmalloc(
4127 sizeof(*net->dev_name_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4128 if (!net->dev_name_head)
4129 return -ENOMEM;
4130
4131 net->dev_index_head = kmalloc(
4132 sizeof(*net->dev_index_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4133 if (!net->dev_index_head) {
4134 kfree(net->dev_name_head);
4135 return -ENOMEM;
4136 }
4137
4138 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4139 INIT_HLIST_HEAD(&net->dev_name_head[i]);
4140
4141 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4142 INIT_HLIST_HEAD(&net->dev_index_head[i]);
4143
4144 return 0;
4145}
4146
4147static void netdev_exit(struct net *net)
4148{
4149 kfree(net->dev_name_head);
4150 kfree(net->dev_index_head);
4151}
4152
4153static struct pernet_operations netdev_net_ops = {
4154 .init = netdev_init,
4155 .exit = netdev_exit,
4156};
4157
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158/*
4159 * Initialize the DEV module. At boot time this walks the device list and
4160 * unhooks any devices that fail to initialise (normally hardware not
4161 * present) and leaves us with a valid list of present and active devices.
4162 *
4163 */
4164
4165/*
4166 * This is called single threaded during boot, so no need
4167 * to take the rtnl semaphore.
4168 */
4169static int __init net_dev_init(void)
4170{
4171 int i, rc = -ENOMEM;
4172
4173 BUG_ON(!dev_boot_phase);
4174
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 if (dev_proc_init())
4176 goto out;
4177
4178 if (netdev_sysfs_init())
4179 goto out;
4180
4181 INIT_LIST_HEAD(&ptype_all);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004182 for (i = 0; i < 16; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 INIT_LIST_HEAD(&ptype_base[i]);
4184
Eric W. Biederman881d9662007-09-17 11:56:21 -07004185 if (register_pernet_subsys(&netdev_net_ops))
4186 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187
4188 /*
4189 * Initialise the packet receive queues.
4190 */
4191
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004192 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 struct softnet_data *queue;
4194
4195 queue = &per_cpu(softnet_data, i);
4196 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 queue->completion_queue = NULL;
4198 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004199
4200 queue->backlog.poll = process_backlog;
4201 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 }
4203
Chris Leechdb217332006-06-17 21:24:58 -07004204 netdev_dma_register();
4205
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 dev_boot_phase = 0;
4207
4208 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4209 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4210
4211 hotcpu_notifier(dev_cpu_callback, 0);
4212 dst_init();
4213 dev_mcast_init();
4214 rc = 0;
4215out:
4216 return rc;
4217}
4218
4219subsys_initcall(net_dev_init);
4220
4221EXPORT_SYMBOL(__dev_get_by_index);
4222EXPORT_SYMBOL(__dev_get_by_name);
4223EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004224EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225EXPORT_SYMBOL(dev_add_pack);
4226EXPORT_SYMBOL(dev_alloc_name);
4227EXPORT_SYMBOL(dev_close);
4228EXPORT_SYMBOL(dev_get_by_flags);
4229EXPORT_SYMBOL(dev_get_by_index);
4230EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231EXPORT_SYMBOL(dev_open);
4232EXPORT_SYMBOL(dev_queue_xmit);
4233EXPORT_SYMBOL(dev_remove_pack);
4234EXPORT_SYMBOL(dev_set_allmulti);
4235EXPORT_SYMBOL(dev_set_promiscuity);
4236EXPORT_SYMBOL(dev_change_flags);
4237EXPORT_SYMBOL(dev_set_mtu);
4238EXPORT_SYMBOL(dev_set_mac_address);
4239EXPORT_SYMBOL(free_netdev);
4240EXPORT_SYMBOL(netdev_boot_setup_check);
4241EXPORT_SYMBOL(netdev_set_master);
4242EXPORT_SYMBOL(netdev_state_change);
4243EXPORT_SYMBOL(netif_receive_skb);
4244EXPORT_SYMBOL(netif_rx);
4245EXPORT_SYMBOL(register_gifconf);
4246EXPORT_SYMBOL(register_netdevice);
4247EXPORT_SYMBOL(register_netdevice_notifier);
4248EXPORT_SYMBOL(skb_checksum_help);
4249EXPORT_SYMBOL(synchronize_net);
4250EXPORT_SYMBOL(unregister_netdevice);
4251EXPORT_SYMBOL(unregister_netdevice_notifier);
4252EXPORT_SYMBOL(net_enable_timestamp);
4253EXPORT_SYMBOL(net_disable_timestamp);
4254EXPORT_SYMBOL(dev_get_flags);
4255
4256#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4257EXPORT_SYMBOL(br_handle_frame_hook);
4258EXPORT_SYMBOL(br_fdb_get_hook);
4259EXPORT_SYMBOL(br_fdb_put_hook);
4260#endif
4261
4262#ifdef CONFIG_KMOD
4263EXPORT_SYMBOL(dev_load);
4264#endif
4265
4266EXPORT_PER_CPU_SYMBOL(softnet_data);