blob: 0dc888ad421723384a295ff151207cea44ec237c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700125#include "net-sysfs.h"
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/*
128 * The list of packet types we will receive (as opposed to discard)
129 * and the routines to invoke.
130 *
131 * Why 16. Because with 16 the only overlap we get on a hash of the
132 * low nibble of the protocol value is RARP/SNAP/X.25.
133 *
134 * NOTE: That is no longer true with the addition of VLAN tags. Not
135 * sure which should go first, but I bet it won't make much
136 * difference if we are running VLANs. The good news is that
137 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700138 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 * --BLG
140 *
141 * 0800 IP
142 * 8100 802.1Q VLAN
143 * 0001 802.3
144 * 0002 AX.25
145 * 0004 802.2
146 * 8035 RARP
147 * 0005 SNAP
148 * 0805 X.25
149 * 0806 ARP
150 * 8137 IPX
151 * 0009 Localtalk
152 * 86DD IPv6
153 */
154
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800155#define PTYPE_HASH_SIZE (16)
156#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800159static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700160static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Chris Leechdb217332006-06-17 21:24:58 -0700162#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700163struct net_dma {
164 struct dma_client client;
165 spinlock_t lock;
166 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700167 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700168};
169
170static enum dma_state_client
171netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
172 enum dma_state state);
173
174static struct net_dma net_dma = {
175 .client = {
176 .event_callback = netdev_dma_event,
177 },
178};
Chris Leechdb217332006-06-17 21:24:58 -0700179#endif
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * semaphore.
184 *
185 * Pure readers hold dev_base_lock for reading.
186 *
187 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700188 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
191 *
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
195 *
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
198 * semaphore held.
199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200DEFINE_RWLOCK(dev_base_lock);
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202EXPORT_SYMBOL(dev_base_lock);
203
204#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biedermance286d32007-09-12 13:53:49 +0200218/* Device list insertion */
219static int list_netdevice(struct net_device *dev)
220{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900221 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222
223 ASSERT_RTNL();
224
225 write_lock_bh(&dev_base_lock);
226 list_add_tail(&dev->dev_list, &net->dev_base_head);
227 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
228 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
229 write_unlock_bh(&dev_base_lock);
230 return 0;
231}
232
233/* Device list removal */
234static void unlist_netdevice(struct net_device *dev)
235{
236 ASSERT_RTNL();
237
238 /* Unlink dev from the device chain */
239 write_lock_bh(&dev_base_lock);
240 list_del(&dev->dev_list);
241 hlist_del(&dev->name_hlist);
242 hlist_del(&dev->index_hlist);
243 write_unlock_bh(&dev_base_lock);
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246/*
247 * Our notifier list
248 */
249
Alan Sternf07d5b92006-05-09 15:23:03 -0700250static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252/*
253 * Device drivers call our routines to queue packets here. We empty the
254 * queue in the local softnet handler.
255 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700256
257DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700259#ifdef CONFIG_DEBUG_LOCK_ALLOC
260/*
261 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
277 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
278 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
279 ARPHRD_NONE};
280
281static const char *netdev_lock_name[] =
282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
294 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
295 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
296 "_xmit_NONE"};
297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
299
300static inline unsigned short netdev_lock_pos(unsigned short dev_type)
301{
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
306 return i;
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
309}
310
311static inline void netdev_set_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
313{
314 int i;
315
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
319}
320#else
321static inline void netdev_set_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
323{
324}
325#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327/*******************************************************************************
328
329 Protocol management and registration routines
330
331*******************************************************************************/
332
333/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * Add a protocol ID to the list. Now that the input handler is
335 * smarter we can dispense with all the messy stuff that used to be
336 * here.
337 *
338 * BEWARE!!! Protocol handlers, mangling input packets,
339 * MUST BE last in hash buckets and checking protocol handlers
340 * MUST start from promiscuous ptype_all chain in net_bh.
341 * It is true now, do not change it.
342 * Explanation follows: if protocol handler, mangling packet, will
343 * be the first on list, it is not able to sense, that packet
344 * is cloned and should be copied-on-write, so that it will
345 * change it and subsequent readers will get broken packet.
346 * --ANK (980803)
347 */
348
349/**
350 * dev_add_pack - add packet handler
351 * @pt: packet type declaration
352 *
353 * Add a protocol handler to the networking stack. The passed &packet_type
354 * is linked into kernel lists and may not be freed until it has been
355 * removed from the kernel lists.
356 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900357 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 * guarantee all CPU's that are in middle of receiving packets
359 * will see the new packet type (until the next received packet).
360 */
361
362void dev_add_pack(struct packet_type *pt)
363{
364 int hash;
365
366 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700367 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700369 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800370 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 list_add_rcu(&pt->list, &ptype_base[hash]);
372 }
373 spin_unlock_bh(&ptype_lock);
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/**
377 * __dev_remove_pack - remove packet handler
378 * @pt: packet type declaration
379 *
380 * Remove a protocol handler that was previously added to the kernel
381 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
382 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900383 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * The packet type might still be in use by receivers
386 * and must not be freed until after all the CPU's have gone
387 * through a quiescent state.
388 */
389void __dev_remove_pack(struct packet_type *pt)
390{
391 struct list_head *head;
392 struct packet_type *pt1;
393
394 spin_lock_bh(&ptype_lock);
395
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700396 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700398 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800399 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 list_for_each_entry(pt1, head, list) {
402 if (pt == pt1) {
403 list_del_rcu(&pt->list);
404 goto out;
405 }
406 }
407
408 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
409out:
410 spin_unlock_bh(&ptype_lock);
411}
412/**
413 * dev_remove_pack - remove packet handler
414 * @pt: packet type declaration
415 *
416 * Remove a protocol handler that was previously added to the kernel
417 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
418 * from the kernel lists and can be freed or reused once this function
419 * returns.
420 *
421 * This call sleeps to guarantee that no CPU is looking at the packet
422 * type after return.
423 */
424void dev_remove_pack(struct packet_type *pt)
425{
426 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 synchronize_net();
429}
430
431/******************************************************************************
432
433 Device Boot-time Settings Routines
434
435*******************************************************************************/
436
437/* Boot time configuration table */
438static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
439
440/**
441 * netdev_boot_setup_add - add new setup entry
442 * @name: name of the device
443 * @map: configured settings for the device
444 *
445 * Adds new setup entry to the dev_boot_setup list. The function
446 * returns 0 on error and 1 on success. This is a generic routine to
447 * all netdevices.
448 */
449static int netdev_boot_setup_add(char *name, struct ifmap *map)
450{
451 struct netdev_boot_setup *s;
452 int i;
453
454 s = dev_boot_setup;
455 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
456 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
457 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700458 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 memcpy(&s[i].map, map, sizeof(s[i].map));
460 break;
461 }
462 }
463
464 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
465}
466
467/**
468 * netdev_boot_setup_check - check boot time settings
469 * @dev: the netdevice
470 *
471 * Check boot time settings for the device.
472 * The found settings are set for the device to be used
473 * later in the device probing.
474 * Returns 0 if no settings found, 1 if they are.
475 */
476int netdev_boot_setup_check(struct net_device *dev)
477{
478 struct netdev_boot_setup *s = dev_boot_setup;
479 int i;
480
481 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
482 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700483 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 dev->irq = s[i].map.irq;
485 dev->base_addr = s[i].map.base_addr;
486 dev->mem_start = s[i].map.mem_start;
487 dev->mem_end = s[i].map.mem_end;
488 return 1;
489 }
490 }
491 return 0;
492}
493
494
495/**
496 * netdev_boot_base - get address from boot time settings
497 * @prefix: prefix for network device
498 * @unit: id for network device
499 *
500 * Check boot time settings for the base address of device.
501 * The found settings are set for the device to be used
502 * later in the device probing.
503 * Returns 0 if no settings found.
504 */
505unsigned long netdev_boot_base(const char *prefix, int unit)
506{
507 const struct netdev_boot_setup *s = dev_boot_setup;
508 char name[IFNAMSIZ];
509 int i;
510
511 sprintf(name, "%s%d", prefix, unit);
512
513 /*
514 * If device already registered then return base of 1
515 * to indicate not to probe for this interface
516 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700517 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 return 1;
519
520 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
521 if (!strcmp(name, s[i].name))
522 return s[i].map.base_addr;
523 return 0;
524}
525
526/*
527 * Saves at boot time configured settings for any netdevice.
528 */
529int __init netdev_boot_setup(char *str)
530{
531 int ints[5];
532 struct ifmap map;
533
534 str = get_options(str, ARRAY_SIZE(ints), ints);
535 if (!str || !*str)
536 return 0;
537
538 /* Save settings */
539 memset(&map, 0, sizeof(map));
540 if (ints[0] > 0)
541 map.irq = ints[1];
542 if (ints[0] > 1)
543 map.base_addr = ints[2];
544 if (ints[0] > 2)
545 map.mem_start = ints[3];
546 if (ints[0] > 3)
547 map.mem_end = ints[4];
548
549 /* Add new entry to the list */
550 return netdev_boot_setup_add(str, &map);
551}
552
553__setup("netdev=", netdev_boot_setup);
554
555/*******************************************************************************
556
557 Device Interface Subroutines
558
559*******************************************************************************/
560
561/**
562 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700563 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 * @name: name to find
565 *
566 * Find an interface by name. Must be called under RTNL semaphore
567 * or @dev_base_lock. If the name is found a pointer to the device
568 * is returned. If the name is not found then %NULL is returned. The
569 * reference counters are not incremented so the caller must be
570 * careful with locks.
571 */
572
Eric W. Biederman881d9662007-09-17 11:56:21 -0700573struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 struct hlist_node *p;
576
Eric W. Biederman881d9662007-09-17 11:56:21 -0700577 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 struct net_device *dev
579 = hlist_entry(p, struct net_device, name_hlist);
580 if (!strncmp(dev->name, name, IFNAMSIZ))
581 return dev;
582 }
583 return NULL;
584}
585
586/**
587 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700588 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 * @name: name to find
590 *
591 * Find an interface by name. This can be called from any
592 * context and does its own locking. The returned handle has
593 * the usage count incremented and the caller must use dev_put() to
594 * release it when it is no longer needed. %NULL is returned if no
595 * matching device is found.
596 */
597
Eric W. Biederman881d9662007-09-17 11:56:21 -0700598struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
600 struct net_device *dev;
601
602 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700603 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (dev)
605 dev_hold(dev);
606 read_unlock(&dev_base_lock);
607 return dev;
608}
609
610/**
611 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700612 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * @ifindex: index of device
614 *
615 * Search for an interface by index. Returns %NULL if the device
616 * is not found or a pointer to the device. The device has not
617 * had its reference counter increased so the caller must be careful
618 * about locking. The caller must hold either the RTNL semaphore
619 * or @dev_base_lock.
620 */
621
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 struct hlist_node *p;
625
Eric W. Biederman881d9662007-09-17 11:56:21 -0700626 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 struct net_device *dev
628 = hlist_entry(p, struct net_device, index_hlist);
629 if (dev->ifindex == ifindex)
630 return dev;
631 }
632 return NULL;
633}
634
635
636/**
637 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700638 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 * @ifindex: index of device
640 *
641 * Search for an interface by index. Returns NULL if the device
642 * is not found or a pointer to the device. The device returned has
643 * had a reference added and the pointer is safe until the user calls
644 * dev_put to indicate they have finished with it.
645 */
646
Eric W. Biederman881d9662007-09-17 11:56:21 -0700647struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 struct net_device *dev;
650
651 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700652 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 if (dev)
654 dev_hold(dev);
655 read_unlock(&dev_base_lock);
656 return dev;
657}
658
659/**
660 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700661 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 * @type: media type of device
663 * @ha: hardware address
664 *
665 * Search for an interface by MAC address. Returns NULL if the device
666 * is not found or a pointer to the device. The caller must hold the
667 * rtnl semaphore. The returned device has not had its ref count increased
668 * and the caller must therefore be careful about locking
669 *
670 * BUGS:
671 * If the API was consistent this would be __dev_get_by_hwaddr
672 */
673
Eric W. Biederman881d9662007-09-17 11:56:21 -0700674struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
676 struct net_device *dev;
677
678 ASSERT_RTNL();
679
Denis V. Lunev81103a52007-12-12 10:47:38 -0800680 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (dev->type == type &&
682 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700683 return dev;
684
685 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300688EXPORT_SYMBOL(dev_getbyhwaddr);
689
Eric W. Biederman881d9662007-09-17 11:56:21 -0700690struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700691{
692 struct net_device *dev;
693
694 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700695 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700696 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700697 return dev;
698
699 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700}
701
702EXPORT_SYMBOL(__dev_getfirstbyhwtype);
703
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 struct net_device *dev;
707
708 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710 if (dev)
711 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 rtnl_unlock();
713 return dev;
714}
715
716EXPORT_SYMBOL(dev_getfirstbyhwtype);
717
718/**
719 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700720 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 * @if_flags: IFF_* values
722 * @mask: bitmask of bits in if_flags to check
723 *
724 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900725 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
728 */
729
Eric W. Biederman881d9662007-09-17 11:56:21 -0700730struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700732 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Pavel Emelianov7562f872007-05-03 15:13:45 -0700734 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700736 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (((dev->flags ^ if_flags) & mask) == 0) {
738 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741 }
742 }
743 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700744 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746
747/**
748 * dev_valid_name - check if name is okay for network device
749 * @name: name string
750 *
751 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700752 * to allow sysfs to work. We also disallow any kind of
753 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800755int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700757 if (*name == '\0')
758 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700759 if (strlen(name) >= IFNAMSIZ)
760 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 if (!strcmp(name, ".") || !strcmp(name, ".."))
762 return 0;
763
764 while (*name) {
765 if (*name == '/' || isspace(*name))
766 return 0;
767 name++;
768 }
769 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
772/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200773 * __dev_alloc_name - allocate a name for a device
774 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200776 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 *
778 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700779 * id. It scans list of devices to build up a free map, then chooses
780 * the first empty slot. The caller must hold the dev_base or rtnl lock
781 * while allocating the name and adding the device in order to avoid
782 * duplicates.
783 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
784 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 */
786
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200787static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 const char *p;
791 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700792 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 struct net_device *d;
794
795 p = strnchr(name, IFNAMSIZ-1, '%');
796 if (p) {
797 /*
798 * Verify the string as this thing may have come from
799 * the user. There must be either one "%d" and no other "%"
800 * characters.
801 */
802 if (p[1] != 'd' || strchr(p + 2, '%'))
803 return -EINVAL;
804
805 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700806 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (!inuse)
808 return -ENOMEM;
809
Eric W. Biederman881d9662007-09-17 11:56:21 -0700810 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (!sscanf(d->name, name, &i))
812 continue;
813 if (i < 0 || i >= max_netdevices)
814 continue;
815
816 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200817 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!strncmp(buf, d->name, IFNAMSIZ))
819 set_bit(i, inuse);
820 }
821
822 i = find_first_zero_bit(inuse, max_netdevices);
823 free_page((unsigned long) inuse);
824 }
825
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
827 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 /* It is possible to run out of possible slots
831 * when the name is long and there isn't enough space left
832 * for the digits, or if all bits are used.
833 */
834 return -ENFILE;
835}
836
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200837/**
838 * dev_alloc_name - allocate a name for a device
839 * @dev: device
840 * @name: name format string
841 *
842 * Passed a format string - eg "lt%d" it will try and find a suitable
843 * id. It scans list of devices to build up a free map, then chooses
844 * the first empty slot. The caller must hold the dev_base or rtnl lock
845 * while allocating the name and adding the device in order to avoid
846 * duplicates.
847 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
848 * Returns the number of the unit assigned or a negative errno code.
849 */
850
851int dev_alloc_name(struct net_device *dev, const char *name)
852{
853 char buf[IFNAMSIZ];
854 struct net *net;
855 int ret;
856
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900857 BUG_ON(!dev_net(dev));
858 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200859 ret = __dev_alloc_name(net, name, buf);
860 if (ret >= 0)
861 strlcpy(dev->name, buf, IFNAMSIZ);
862 return ret;
863}
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866/**
867 * dev_change_name - change name of a device
868 * @dev: device
869 * @newname: name (or format string) must be at least IFNAMSIZ
870 *
871 * Change name of a device, can pass format strings "eth%d".
872 * for wildcarding.
873 */
874int dev_change_name(struct net_device *dev, char *newname)
875{
Herbert Xufcc5a032007-07-30 17:03:38 -0700876 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700878 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900882 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900884 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (dev->flags & IFF_UP)
886 return -EBUSY;
887
888 if (!dev_valid_name(newname))
889 return -EINVAL;
890
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700891 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
892 return 0;
893
Herbert Xufcc5a032007-07-30 17:03:38 -0700894 memcpy(oldname, dev->name, IFNAMSIZ);
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (strchr(newname, '%')) {
897 err = dev_alloc_name(dev, newname);
898 if (err < 0)
899 return err;
900 strcpy(newname, dev->name);
901 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700902 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return -EEXIST;
904 else
905 strlcpy(dev->name, newname, IFNAMSIZ);
906
Herbert Xufcc5a032007-07-30 17:03:38 -0700907rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700908 err = device_rename(&dev->dev, dev->name);
909 if (err) {
910 memcpy(dev->name, oldname, IFNAMSIZ);
911 return err;
912 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700913
914 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600915 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700916 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700917 write_unlock_bh(&dev_base_lock);
918
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700919 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700920 ret = notifier_to_errno(ret);
921
922 if (ret) {
923 if (err) {
924 printk(KERN_ERR
925 "%s: name change rollback failed: %d.\n",
926 dev->name, ret);
927 } else {
928 err = ret;
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 goto rollback;
931 }
932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 return err;
935}
936
937/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700938 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700939 * @dev: device to cause notification
940 *
941 * Called to indicate a device has changed features.
942 */
943void netdev_features_change(struct net_device *dev)
944{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700945 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700946}
947EXPORT_SYMBOL(netdev_features_change);
948
949/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * netdev_state_change - device changes state
951 * @dev: device to cause notification
952 *
953 * Called to indicate a device has changed state. This function calls
954 * the notifier chains for netdev_chain and sends a NEWLINK message
955 * to the routing socket.
956 */
957void netdev_state_change(struct net_device *dev)
958{
959 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700960 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
962 }
963}
964
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700965void netdev_bonding_change(struct net_device *dev)
966{
967 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
968}
969EXPORT_SYMBOL(netdev_bonding_change);
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/**
972 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700973 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * @name: name of interface
975 *
976 * If a network interface is not present and the process has suitable
977 * privileges this function loads the module. If module loading is not
978 * available in this kernel then it becomes a nop.
979 */
980
Eric W. Biederman881d9662007-09-17 11:56:21 -0700981void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900983 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700986 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 read_unlock(&dev_base_lock);
988
989 if (!dev && capable(CAP_SYS_MODULE))
990 request_module("%s", name);
991}
992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993/**
994 * dev_open - prepare an interface for use.
995 * @dev: device to open
996 *
997 * Takes a device from down to up state. The device's private open
998 * function is invoked and then the multicast lists are loaded. Finally
999 * the device is moved into the up state and a %NETDEV_UP message is
1000 * sent to the netdev notifier chain.
1001 *
1002 * Calling this function on an active interface is a nop. On a failure
1003 * a negative errno code is returned.
1004 */
1005int dev_open(struct net_device *dev)
1006{
1007 int ret = 0;
1008
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001009 ASSERT_RTNL();
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 /*
1012 * Is it already up?
1013 */
1014
1015 if (dev->flags & IFF_UP)
1016 return 0;
1017
1018 /*
1019 * Is it even present?
1020 */
1021 if (!netif_device_present(dev))
1022 return -ENODEV;
1023
1024 /*
1025 * Call device private open method
1026 */
1027 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001028
1029 if (dev->validate_addr)
1030 ret = dev->validate_addr(dev);
1031
1032 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001035 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 * If it went open OK then:
1037 */
1038
Jeff Garzikbada3392007-10-23 20:19:37 -07001039 if (ret)
1040 clear_bit(__LINK_STATE_START, &dev->state);
1041 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /*
1043 * Set the flags.
1044 */
1045 dev->flags |= IFF_UP;
1046
1047 /*
1048 * Initialize multicasting status
1049 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001050 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /*
1053 * Wakeup transmit queue engine
1054 */
1055 dev_activate(dev);
1056
1057 /*
1058 * ... and announce new interface.
1059 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001060 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return ret;
1064}
1065
1066/**
1067 * dev_close - shutdown an interface.
1068 * @dev: device to shutdown
1069 *
1070 * This function moves an active device into down state. A
1071 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1072 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1073 * chain.
1074 */
1075int dev_close(struct net_device *dev)
1076{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001077 ASSERT_RTNL();
1078
David S. Miller9d5010d2007-09-12 14:33:25 +02001079 might_sleep();
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!(dev->flags & IFF_UP))
1082 return 0;
1083
1084 /*
1085 * Tell people we are going down, so that they can
1086 * prepare to death, when device is still operating.
1087 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 clear_bit(__LINK_STATE_START, &dev->state);
1091
1092 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001093 * it can be even on different cpu. So just clear netif_running().
1094 *
1095 * dev->stop() will invoke napi_disable() on all of it's
1096 * napi_struct instances on this device.
1097 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001100 dev_deactivate(dev);
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 /*
1103 * Call the device specific close. This cannot fail.
1104 * Only if device is UP
1105 *
1106 * We allow it to be called even after a DETACH hot-plug
1107 * event.
1108 */
1109 if (dev->stop)
1110 dev->stop(dev);
1111
1112 /*
1113 * Device is now down.
1114 */
1115
1116 dev->flags &= ~IFF_UP;
1117
1118 /*
1119 * Tell people we are down
1120 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001121 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 return 0;
1124}
1125
1126
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001127/**
1128 * dev_disable_lro - disable Large Receive Offload on a device
1129 * @dev: device
1130 *
1131 * Disable Large Receive Offload (LRO) on a net device. Must be
1132 * called under RTNL. This is needed if received packets may be
1133 * forwarded to another interface.
1134 */
1135void dev_disable_lro(struct net_device *dev)
1136{
1137 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1138 dev->ethtool_ops->set_flags) {
1139 u32 flags = dev->ethtool_ops->get_flags(dev);
1140 if (flags & ETH_FLAG_LRO) {
1141 flags &= ~ETH_FLAG_LRO;
1142 dev->ethtool_ops->set_flags(dev, flags);
1143 }
1144 }
1145 WARN_ON(dev->features & NETIF_F_LRO);
1146}
1147EXPORT_SYMBOL(dev_disable_lro);
1148
1149
Eric W. Biederman881d9662007-09-17 11:56:21 -07001150static int dev_boot_phase = 1;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152/*
1153 * Device change register/unregister. These are not inline or static
1154 * as we export them to the world.
1155 */
1156
1157/**
1158 * register_netdevice_notifier - register a network notifier block
1159 * @nb: notifier
1160 *
1161 * Register a notifier to be called when network device events occur.
1162 * The notifier passed is linked into the kernel structures and must
1163 * not be reused until it has been unregistered. A negative errno code
1164 * is returned on a failure.
1165 *
1166 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001167 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 * view of the network device list.
1169 */
1170
1171int register_netdevice_notifier(struct notifier_block *nb)
1172{
1173 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001174 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001175 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 int err;
1177
1178 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001179 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001180 if (err)
1181 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001182 if (dev_boot_phase)
1183 goto unlock;
1184 for_each_net(net) {
1185 for_each_netdev(net, dev) {
1186 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1187 err = notifier_to_errno(err);
1188 if (err)
1189 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
Eric W. Biederman881d9662007-09-17 11:56:21 -07001191 if (!(dev->flags & IFF_UP))
1192 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001193
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 nb->notifier_call(nb, NETDEV_UP, dev);
1195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001197
1198unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 rtnl_unlock();
1200 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001201
1202rollback:
1203 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001204 for_each_net(net) {
1205 for_each_netdev(net, dev) {
1206 if (dev == last)
1207 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001208
Eric W. Biederman881d9662007-09-17 11:56:21 -07001209 if (dev->flags & IFF_UP) {
1210 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1211 nb->notifier_call(nb, NETDEV_DOWN, dev);
1212 }
1213 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001214 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001215 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001216
1217 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
1221/**
1222 * unregister_netdevice_notifier - unregister a network notifier block
1223 * @nb: notifier
1224 *
1225 * Unregister a notifier previously registered by
1226 * register_netdevice_notifier(). The notifier is unlinked into the
1227 * kernel structures and may then be reused. A negative errno code
1228 * is returned on a failure.
1229 */
1230
1231int unregister_netdevice_notifier(struct notifier_block *nb)
1232{
Herbert Xu9f514952006-03-25 01:24:25 -08001233 int err;
1234
1235 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001236 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001237 rtnl_unlock();
1238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239}
1240
1241/**
1242 * call_netdevice_notifiers - call all network notifier blocks
1243 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001244 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 *
1246 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001247 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 */
1249
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001250int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001252 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}
1254
1255/* When > 0 there are consumers of rx skb time stamps */
1256static atomic_t netstamp_needed = ATOMIC_INIT(0);
1257
1258void net_enable_timestamp(void)
1259{
1260 atomic_inc(&netstamp_needed);
1261}
1262
1263void net_disable_timestamp(void)
1264{
1265 atomic_dec(&netstamp_needed);
1266}
1267
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001268static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
1270 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001271 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001272 else
1273 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/*
1277 * Support routine. Sends outgoing frames to any network
1278 * taps currently in use.
1279 */
1280
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001281static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001284
1285 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 rcu_read_lock();
1288 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1289 /* Never send packets back to the socket
1290 * they originated from - MvS (miquels@drinkel.ow.org)
1291 */
1292 if ((ptype->dev == dev || !ptype->dev) &&
1293 (ptype->af_packet_priv == NULL ||
1294 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1295 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1296 if (!skb2)
1297 break;
1298
1299 /* skb->nh should be correctly
1300 set by sender, so that the second statement is
1301 just protection against buggy protocols.
1302 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001303 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001305 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001306 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 if (net_ratelimit())
1308 printk(KERN_CRIT "protocol %04x is "
1309 "buggy, dev %s\n",
1310 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001311 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 }
1313
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001314 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001316 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
1318 }
1319 rcu_read_unlock();
1320}
1321
Denis Vlasenko56079432006-03-29 15:57:29 -08001322
David S. Miller86d804e2008-07-08 23:11:25 -07001323void __netif_schedule(struct netdev_queue *txq)
Denis Vlasenko56079432006-03-29 15:57:29 -08001324{
David S. Miller86d804e2008-07-08 23:11:25 -07001325 struct net_device *dev = txq->dev;
1326
Denis Vlasenko56079432006-03-29 15:57:29 -08001327 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
Denis Vlasenko56079432006-03-29 15:57:29 -08001328 struct softnet_data *sd;
David S. Miller86d804e2008-07-08 23:11:25 -07001329 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001330
1331 local_irq_save(flags);
1332 sd = &__get_cpu_var(softnet_data);
David S. Milleree609cb2008-07-08 22:58:37 -07001333 txq->next_sched = sd->output_queue;
1334 sd->output_queue = txq;
Denis Vlasenko56079432006-03-29 15:57:29 -08001335 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1336 local_irq_restore(flags);
1337 }
1338}
1339EXPORT_SYMBOL(__netif_schedule);
1340
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001341void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001342{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001343 if (atomic_dec_and_test(&skb->users)) {
1344 struct softnet_data *sd;
1345 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001346
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 skb->next = sd->completion_queue;
1350 sd->completion_queue = skb;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1353 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001354}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001355EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001356
1357void dev_kfree_skb_any(struct sk_buff *skb)
1358{
1359 if (in_irq() || irqs_disabled())
1360 dev_kfree_skb_irq(skb);
1361 else
1362 dev_kfree_skb(skb);
1363}
1364EXPORT_SYMBOL(dev_kfree_skb_any);
1365
1366
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001367/**
1368 * netif_device_detach - mark device as removed
1369 * @dev: network device
1370 *
1371 * Mark device as removed from system and therefore no longer available.
1372 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001373void netif_device_detach(struct net_device *dev)
1374{
1375 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1376 netif_running(dev)) {
1377 netif_stop_queue(dev);
1378 }
1379}
1380EXPORT_SYMBOL(netif_device_detach);
1381
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001382/**
1383 * netif_device_attach - mark device as attached
1384 * @dev: network device
1385 *
1386 * Mark device as attached from system and restart if needed.
1387 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001388void netif_device_attach(struct net_device *dev)
1389{
1390 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1391 netif_running(dev)) {
1392 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001393 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001394 }
1395}
1396EXPORT_SYMBOL(netif_device_attach);
1397
Ben Hutchings6de329e2008-06-16 17:02:28 -07001398static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1399{
1400 return ((features & NETIF_F_GEN_CSUM) ||
1401 ((features & NETIF_F_IP_CSUM) &&
1402 protocol == htons(ETH_P_IP)) ||
1403 ((features & NETIF_F_IPV6_CSUM) &&
1404 protocol == htons(ETH_P_IPV6)));
1405}
1406
1407static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1408{
1409 if (can_checksum_protocol(dev->features, skb->protocol))
1410 return true;
1411
1412 if (skb->protocol == htons(ETH_P_8021Q)) {
1413 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1414 if (can_checksum_protocol(dev->features & dev->vlan_features,
1415 veh->h_vlan_encapsulated_proto))
1416 return true;
1417 }
1418
1419 return false;
1420}
Denis Vlasenko56079432006-03-29 15:57:29 -08001421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422/*
1423 * Invalidate hardware checksum when packet is to be mangled, and
1424 * complete checksum manually on outgoing path.
1425 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001426int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
Al Virod3bc23e2006-11-14 21:24:49 -08001428 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001429 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Patrick McHardy84fa7932006-08-29 16:44:56 -07001431 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001432 goto out_set_summed;
1433
1434 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001435 /* Let GSO fix up the checksum. */
1436 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
1438
Herbert Xua0308472007-10-15 01:47:15 -07001439 offset = skb->csum_start - skb_headroom(skb);
1440 BUG_ON(offset >= skb_headlen(skb));
1441 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1442
1443 offset += skb->csum_offset;
1444 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1445
1446 if (skb_cloned(skb) &&
1447 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1449 if (ret)
1450 goto out;
1451 }
1452
Herbert Xua0308472007-10-15 01:47:15 -07001453 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001454out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001456out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return ret;
1458}
1459
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001460/**
1461 * skb_gso_segment - Perform segmentation on skb.
1462 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001463 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001464 *
1465 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001466 *
1467 * It may return NULL if the skb requires no segmentation. This is
1468 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001469 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001470struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001471{
1472 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1473 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001474 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001475 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001476
1477 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001478
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001479 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001480 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001481 __skb_pull(skb, skb->mac_len);
1482
Herbert Xuf9d106a2007-04-23 22:36:13 -07001483 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001484 if (skb_header_cloned(skb) &&
1485 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1486 return ERR_PTR(err);
1487 }
1488
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001489 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001490 list_for_each_entry_rcu(ptype,
1491 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001492 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001493 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001494 err = ptype->gso_send_check(skb);
1495 segs = ERR_PTR(err);
1496 if (err || skb_gso_ok(skb, features))
1497 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001498 __skb_push(skb, (skb->data -
1499 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001500 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001501 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001502 break;
1503 }
1504 }
1505 rcu_read_unlock();
1506
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001507 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001508
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001509 return segs;
1510}
1511
1512EXPORT_SYMBOL(skb_gso_segment);
1513
Herbert Xufb286bb2005-11-10 13:01:24 -08001514/* Take action when hardware reception checksum errors are detected. */
1515#ifdef CONFIG_BUG
1516void netdev_rx_csum_fault(struct net_device *dev)
1517{
1518 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001519 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001520 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001521 dump_stack();
1522 }
1523}
1524EXPORT_SYMBOL(netdev_rx_csum_fault);
1525#endif
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527/* Actually, we should eliminate this check as soon as we know, that:
1528 * 1. IOMMU is present and allows to map all the memory.
1529 * 2. No high memory really exists on this machine.
1530 */
1531
1532static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1533{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001534#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 int i;
1536
1537 if (dev->features & NETIF_F_HIGHDMA)
1538 return 0;
1539
1540 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1541 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1542 return 1;
1543
Herbert Xu3d3a8532006-06-27 13:33:10 -07001544#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return 0;
1546}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001548struct dev_gso_cb {
1549 void (*destructor)(struct sk_buff *skb);
1550};
1551
1552#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1553
1554static void dev_gso_skb_destructor(struct sk_buff *skb)
1555{
1556 struct dev_gso_cb *cb;
1557
1558 do {
1559 struct sk_buff *nskb = skb->next;
1560
1561 skb->next = nskb->next;
1562 nskb->next = NULL;
1563 kfree_skb(nskb);
1564 } while (skb->next);
1565
1566 cb = DEV_GSO_CB(skb);
1567 if (cb->destructor)
1568 cb->destructor(skb);
1569}
1570
1571/**
1572 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1573 * @skb: buffer to segment
1574 *
1575 * This function segments the given skb and stores the list of segments
1576 * in skb->next.
1577 */
1578static int dev_gso_segment(struct sk_buff *skb)
1579{
1580 struct net_device *dev = skb->dev;
1581 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001582 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1583 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001584
Herbert Xu576a30e2006-06-27 13:22:38 -07001585 segs = skb_gso_segment(skb, features);
1586
1587 /* Verifying header integrity only. */
1588 if (!segs)
1589 return 0;
1590
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001591 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001592 return PTR_ERR(segs);
1593
1594 skb->next = segs;
1595 DEV_GSO_CB(skb)->destructor = skb->destructor;
1596 skb->destructor = dev_gso_skb_destructor;
1597
1598 return 0;
1599}
1600
1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1602{
1603 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001604 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605 dev_queue_xmit_nit(skb, dev);
1606
Herbert Xu576a30e2006-06-27 13:22:38 -07001607 if (netif_needs_gso(dev, skb)) {
1608 if (unlikely(dev_gso_segment(skb)))
1609 goto out_kfree_skb;
1610 if (skb->next)
1611 goto gso;
1612 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001613
Herbert Xu576a30e2006-06-27 13:22:38 -07001614 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001615 }
1616
Herbert Xu576a30e2006-06-27 13:22:38 -07001617gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001618 do {
1619 struct sk_buff *nskb = skb->next;
1620 int rc;
1621
1622 skb->next = nskb->next;
1623 nskb->next = NULL;
1624 rc = dev->hard_start_xmit(nskb, dev);
1625 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001626 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001627 skb->next = nskb;
1628 return rc;
1629 }
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001630 if (unlikely((netif_queue_stopped(dev) ||
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001631 netif_subqueue_stopped(dev, skb)) &&
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001632 skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001633 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001634 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001635
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001636 skb->destructor = DEV_GSO_CB(skb)->destructor;
1637
1638out_kfree_skb:
1639 kfree_skb(skb);
1640 return 0;
1641}
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643/**
1644 * dev_queue_xmit - transmit a buffer
1645 * @skb: buffer to transmit
1646 *
1647 * Queue a buffer for transmission to a network device. The caller must
1648 * have set the device and priority and built the buffer before calling
1649 * this function. The function can be called from an interrupt.
1650 *
1651 * A negative errno code is returned on a failure. A success does not
1652 * guarantee the frame will be transmitted as it may be dropped due
1653 * to congestion or traffic shaping.
Ben Greearaf191362005-04-24 20:12:36 -07001654 *
1655 * -----------------------------------------------------------------------------------
1656 * I notice this method can also return errors from the queue disciplines,
1657 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1658 * be positive.
1659 *
1660 * Regardless of the return value, the skb is consumed, so it is currently
1661 * difficult to retry a send to this method. (You can bump the ref count
1662 * before sending to hold a reference for retry if you are careful.)
1663 *
1664 * When calling this method, interrupts MUST be enabled. This is because
1665 * the BH enable code must have IRQs enabled so that it will not deadlock.
1666 * --BLG
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 */
1668
1669int dev_queue_xmit(struct sk_buff *skb)
1670{
1671 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001672 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 struct Qdisc *q;
1674 int rc = -ENOMEM;
1675
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001676 /* GSO will handle the following emulations directly. */
1677 if (netif_needs_gso(dev, skb))
1678 goto gso;
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 if (skb_shinfo(skb)->frag_list &&
1681 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001682 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 goto out_kfree_skb;
1684
1685 /* Fragmented skb is linearized if device does not support SG,
1686 * or if at least one of fragments is in highmem and device
1687 * does not support DMA from it.
1688 */
1689 if (skb_shinfo(skb)->nr_frags &&
1690 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001691 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 goto out_kfree_skb;
1693
1694 /* If packet is not checksummed and device does not support
1695 * checksumming for this protocol, complete checksumming here.
1696 */
Herbert Xu663ead32007-04-09 11:59:07 -07001697 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1698 skb_set_transport_header(skb, skb->csum_start -
1699 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001700 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1701 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001704gso:
David S. Millerdc2b4842008-07-08 17:18:23 -07001705 txq = &dev->tx_queue;
1706 spin_lock_prefetch(&txq->lock);
Eric Dumazet2d7ceec2005-09-27 15:22:58 -07001707
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001708 /* Disable soft irqs for various locks below. Also
1709 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001711 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
David S. Millerdc2b4842008-07-08 17:18:23 -07001713 /* Updates of qdisc are serialized by queue->lock.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001714 * The struct Qdisc which is pointed to by qdisc is now a
1715 * rcu structure - it may be accessed without acquiring
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 * a lock (but the structure may be stale.) The freeing of the
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001717 * qdisc will be deferred until it's known that there are no
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 * more references to it.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001719 *
1720 * If the qdisc has an enqueue function, we still need to
David S. Millerdc2b4842008-07-08 17:18:23 -07001721 * hold the queue->lock before calling it, since queue->lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 * also serializes access to the device queue.
1723 */
1724
David S. Millerb0e1e642008-07-08 17:42:10 -07001725 q = rcu_dereference(txq->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726#ifdef CONFIG_NET_CLS_ACT
1727 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1728#endif
1729 if (q->enqueue) {
1730 /* Grab device queue */
David S. Millerdc2b4842008-07-08 17:18:23 -07001731 spin_lock(&txq->lock);
David S. Millerb0e1e642008-07-08 17:42:10 -07001732 q = txq->qdisc;
Patrick McHardy85670cc2006-09-27 16:45:45 -07001733 if (q->enqueue) {
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001734 /* reset queue_mapping to zero */
Pavel Emelyanovdfa40912007-10-21 16:57:55 -07001735 skb_set_queue_mapping(skb, 0);
Patrick McHardy85670cc2006-09-27 16:45:45 -07001736 rc = q->enqueue(skb, q);
1737 qdisc_run(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07001738 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Patrick McHardy85670cc2006-09-27 16:45:45 -07001740 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1741 goto out;
1742 }
David S. Millerdc2b4842008-07-08 17:18:23 -07001743 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 }
1745
1746 /* The device has no queue. Common case for software devices:
1747 loopback, all the sorts of tunnels...
1748
Herbert Xu932ff272006-06-09 12:20:56 -07001749 Really, it is unlikely that netif_tx_lock protection is necessary
1750 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 counters.)
1752 However, it is possible, that they rely on protection
1753 made by us here.
1754
1755 Check this and shot the lock. It is not prone from deadlocks.
1756 Either shot noqueue qdisc, it is even simpler 8)
1757 */
1758 if (dev->flags & IFF_UP) {
1759 int cpu = smp_processor_id(); /* ok because BHs are off */
1760
1761 if (dev->xmit_lock_owner != cpu) {
1762
1763 HARD_TX_LOCK(dev, cpu);
1764
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001765 if (!netif_queue_stopped(dev) &&
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001766 !netif_subqueue_stopped(dev, skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 rc = 0;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001768 if (!dev_hard_start_xmit(skb, dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 HARD_TX_UNLOCK(dev);
1770 goto out;
1771 }
1772 }
1773 HARD_TX_UNLOCK(dev);
1774 if (net_ratelimit())
1775 printk(KERN_CRIT "Virtual device %s asks to "
1776 "queue packet!\n", dev->name);
1777 } else {
1778 /* Recursion is detected! It is possible,
1779 * unfortunately */
1780 if (net_ratelimit())
1781 printk(KERN_CRIT "Dead loop on virtual device "
1782 "%s, fix it urgently!\n", dev->name);
1783 }
1784 }
1785
1786 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001787 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789out_kfree_skb:
1790 kfree_skb(skb);
1791 return rc;
1792out:
Herbert Xud4828d82006-06-22 02:28:18 -07001793 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return rc;
1795}
1796
1797
1798/*=======================================================================
1799 Receiver routines
1800 =======================================================================*/
1801
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001802int netdev_max_backlog __read_mostly = 1000;
1803int netdev_budget __read_mostly = 300;
1804int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
1806DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1807
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809/**
1810 * netif_rx - post buffer to the network code
1811 * @skb: buffer to post
1812 *
1813 * This function receives a packet from a device driver and queues it for
1814 * the upper (protocol) levels to process. It always succeeds. The buffer
1815 * may be dropped during processing for congestion control or by the
1816 * protocol layers.
1817 *
1818 * return values:
1819 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 * NET_RX_DROP (packet was dropped)
1821 *
1822 */
1823
1824int netif_rx(struct sk_buff *skb)
1825{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 struct softnet_data *queue;
1827 unsigned long flags;
1828
1829 /* if netpoll wants it, pretend we never saw it */
1830 if (netpoll_rx(skb))
1831 return NET_RX_DROP;
1832
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001833 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001834 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
1836 /*
1837 * The code is rearranged so that the path is the most
1838 * short when CPU is congested, but is still operating.
1839 */
1840 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 queue = &__get_cpu_var(softnet_data);
1842
1843 __get_cpu_var(netdev_rx_stat).total++;
1844 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1845 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846enqueue:
1847 dev_hold(skb->dev);
1848 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001850 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
1852
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001853 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 goto enqueue;
1855 }
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 __get_cpu_var(netdev_rx_stat).dropped++;
1858 local_irq_restore(flags);
1859
1860 kfree_skb(skb);
1861 return NET_RX_DROP;
1862}
1863
1864int netif_rx_ni(struct sk_buff *skb)
1865{
1866 int err;
1867
1868 preempt_disable();
1869 err = netif_rx(skb);
1870 if (local_softirq_pending())
1871 do_softirq();
1872 preempt_enable();
1873
1874 return err;
1875}
1876
1877EXPORT_SYMBOL(netif_rx_ni);
1878
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001879static inline struct net_device *skb_bond(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
1881 struct net_device *dev = skb->dev;
1882
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001883 if (dev->master) {
David S. Miller7ea49ed2006-08-14 17:08:36 -07001884 if (skb_bond_should_drop(skb)) {
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001885 kfree_skb(skb);
1886 return NULL;
1887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 skb->dev = dev->master;
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001889 }
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001890
1891 return dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895static void net_tx_action(struct softirq_action *h)
1896{
1897 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1898
1899 if (sd->completion_queue) {
1900 struct sk_buff *clist;
1901
1902 local_irq_disable();
1903 clist = sd->completion_queue;
1904 sd->completion_queue = NULL;
1905 local_irq_enable();
1906
1907 while (clist) {
1908 struct sk_buff *skb = clist;
1909 clist = clist->next;
1910
1911 BUG_TRAP(!atomic_read(&skb->users));
1912 __kfree_skb(skb);
1913 }
1914 }
1915
1916 if (sd->output_queue) {
David S. Milleree609cb2008-07-08 22:58:37 -07001917 struct netdev_queue *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
1919 local_irq_disable();
1920 head = sd->output_queue;
1921 sd->output_queue = NULL;
1922 local_irq_enable();
1923
1924 while (head) {
David S. Milleree609cb2008-07-08 22:58:37 -07001925 struct netdev_queue *txq = head;
1926 struct net_device *dev = txq->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 head = head->next_sched;
1928
1929 smp_mb__before_clear_bit();
1930 clear_bit(__LINK_STATE_SCHED, &dev->state);
1931
David S. Millerdc2b4842008-07-08 17:18:23 -07001932 if (spin_trylock(&txq->lock)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 qdisc_run(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07001934 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 } else {
David S. Miller86d804e2008-07-08 23:11:25 -07001936 netif_schedule_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 }
1938 }
1939 }
1940}
1941
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001942static inline int deliver_skb(struct sk_buff *skb,
1943 struct packet_type *pt_prev,
1944 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
1946 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001947 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
1950#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07001951/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952struct net_bridge;
1953struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1954 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07001955void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
Stephen Hemminger6229e362007-03-21 13:38:47 -07001957/*
1958 * If bridge module is loaded call bridging hook.
1959 * returns NULL if packet was consumed.
1960 */
1961struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1962 struct sk_buff *skb) __read_mostly;
1963static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1964 struct packet_type **pt_prev, int *ret,
1965 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966{
1967 struct net_bridge_port *port;
1968
Stephen Hemminger6229e362007-03-21 13:38:47 -07001969 if (skb->pkt_type == PACKET_LOOPBACK ||
1970 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1971 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
1973 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07001974 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001976 }
1977
Stephen Hemminger6229e362007-03-21 13:38:47 -07001978 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979}
1980#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07001981#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982#endif
1983
Patrick McHardyb863ceb2007-07-14 18:55:06 -07001984#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1985struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1986EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1987
1988static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1989 struct packet_type **pt_prev,
1990 int *ret,
1991 struct net_device *orig_dev)
1992{
1993 if (skb->dev->macvlan_port == NULL)
1994 return skb;
1995
1996 if (*pt_prev) {
1997 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1998 *pt_prev = NULL;
1999 }
2000 return macvlan_handle_frame_hook(skb);
2001}
2002#else
2003#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2004#endif
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006#ifdef CONFIG_NET_CLS_ACT
2007/* TODO: Maybe we should just force sch_ingress to be compiled in
2008 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2009 * a compare and 2 stores extra right now if we dont have it on
2010 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002011 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 * the ingress scheduler, you just cant add policies on ingress.
2013 *
2014 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002015static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002018 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002019 struct netdev_queue *rxq;
2020 int result = TC_ACT_OK;
2021 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002022
Herbert Xuf697c3e2007-10-14 00:38:47 -07002023 if (MAX_RED_LOOP < ttl++) {
2024 printk(KERN_WARNING
2025 "Redir loop detected Dropping packet (%d->%d)\n",
2026 skb->iif, dev->ifindex);
2027 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 }
2029
Herbert Xuf697c3e2007-10-14 00:38:47 -07002030 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2031 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2032
David S. Miller555353c2008-07-08 17:33:13 -07002033 rxq = &dev->rx_queue;
2034
2035 spin_lock(&rxq->lock);
David S. Miller816f3252008-07-08 22:49:00 -07002036 if ((q = rxq->qdisc) != NULL)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002037 result = q->enqueue(skb, q);
David S. Miller555353c2008-07-08 17:33:13 -07002038 spin_unlock(&rxq->lock);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 return result;
2041}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002042
2043static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2044 struct packet_type **pt_prev,
2045 int *ret, struct net_device *orig_dev)
2046{
David S. Miller816f3252008-07-08 22:49:00 -07002047 if (!skb->dev->rx_queue.qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002048 goto out;
2049
2050 if (*pt_prev) {
2051 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2052 *pt_prev = NULL;
2053 } else {
2054 /* Huh? Why does turning on AF_PACKET affect this? */
2055 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2056 }
2057
2058 switch (ing_filter(skb)) {
2059 case TC_ACT_SHOT:
2060 case TC_ACT_STOLEN:
2061 kfree_skb(skb);
2062 return NULL;
2063 }
2064
2065out:
2066 skb->tc_verd = 0;
2067 return skb;
2068}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069#endif
2070
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002071/**
2072 * netif_receive_skb - process receive buffer from network
2073 * @skb: buffer to process
2074 *
2075 * netif_receive_skb() is the main receive data processing function.
2076 * It always succeeds. The buffer may be dropped during processing
2077 * for congestion control or by the protocol layers.
2078 *
2079 * This function may only be called from softirq context and interrupts
2080 * should be enabled.
2081 *
2082 * Return values (usually ignored):
2083 * NET_RX_SUCCESS: no congestion
2084 * NET_RX_DROP: packet was dropped
2085 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086int netif_receive_skb(struct sk_buff *skb)
2087{
2088 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002089 struct net_device *orig_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002091 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
2093 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002094 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 return NET_RX_DROP;
2096
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002097 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002098 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
Patrick McHardyc01003c2007-03-29 11:46:52 -07002100 if (!skb->iif)
2101 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002102
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002103 orig_dev = skb_bond(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002105 if (!orig_dev)
2106 return NET_RX_DROP;
2107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 __get_cpu_var(netdev_rx_stat).total++;
2109
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002110 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002111 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002112 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 pt_prev = NULL;
2115
2116 rcu_read_lock();
2117
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002118 /* Don't receive packets in an exiting network namespace */
2119 if (!net_alive(dev_net(skb->dev)))
2120 goto out;
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122#ifdef CONFIG_NET_CLS_ACT
2123 if (skb->tc_verd & TC_NCLS) {
2124 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2125 goto ncls;
2126 }
2127#endif
2128
2129 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2130 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002131 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002132 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 pt_prev = ptype;
2134 }
2135 }
2136
2137#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002138 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2139 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141ncls:
2142#endif
2143
Stephen Hemminger6229e362007-03-21 13:38:47 -07002144 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2145 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002147 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2148 if (!skb)
2149 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002152 list_for_each_entry_rcu(ptype,
2153 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 if (ptype->type == type &&
2155 (!ptype->dev || ptype->dev == skb->dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002156 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002157 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 pt_prev = ptype;
2159 }
2160 }
2161
2162 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002163 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 } else {
2165 kfree_skb(skb);
2166 /* Jamal, now you will not able to escape explaining
2167 * me how you were going to use this. :-)
2168 */
2169 ret = NET_RX_DROP;
2170 }
2171
2172out:
2173 rcu_read_unlock();
2174 return ret;
2175}
2176
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002177static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178{
2179 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2181 unsigned long start_time = jiffies;
2182
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002183 napi->weight = weight_p;
2184 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 struct sk_buff *skb;
2186 struct net_device *dev;
2187
2188 local_irq_disable();
2189 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002190 if (!skb) {
2191 __napi_complete(napi);
2192 local_irq_enable();
2193 break;
2194 }
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 local_irq_enable();
2197
2198 dev = skb->dev;
2199
2200 netif_receive_skb(skb);
2201
2202 dev_put(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002203 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002205 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206}
2207
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002208/**
2209 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002210 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002211 *
2212 * The entry's receive function will be scheduled to run
2213 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002214void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002215{
2216 unsigned long flags;
2217
2218 local_irq_save(flags);
2219 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2220 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2221 local_irq_restore(flags);
2222}
2223EXPORT_SYMBOL(__napi_schedule);
2224
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226static void net_rx_action(struct softirq_action *h)
2227{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002228 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002230 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002231 void *have;
2232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 local_irq_disable();
2234
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002235 while (!list_empty(list)) {
2236 struct napi_struct *n;
2237 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002239 /* If softirq window is exhuasted then punt.
2240 *
2241 * Note that this is a slight policy change from the
2242 * previous NAPI code, which would allow up to 2
2243 * jiffies to pass before breaking out. The test
2244 * used to be "jiffies - start_time > 1".
2245 */
2246 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 goto softnet_break;
2248
2249 local_irq_enable();
2250
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002251 /* Even though interrupts have been re-enabled, this
2252 * access is safe because interrupts can only add new
2253 * entries to the tail of this list, and only ->poll()
2254 * calls can remove this head entry from the list.
2255 */
2256 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002258 have = netpoll_poll_lock(n);
2259
2260 weight = n->weight;
2261
David S. Miller0a7606c2007-10-29 21:28:47 -07002262 /* This NAPI_STATE_SCHED test is for avoiding a race
2263 * with netpoll's poll_napi(). Only the entity which
2264 * obtains the lock and sees NAPI_STATE_SCHED set will
2265 * actually make the ->poll() call. Therefore we avoid
2266 * accidently calling ->poll() when NAPI is not scheduled.
2267 */
2268 work = 0;
2269 if (test_bit(NAPI_STATE_SCHED, &n->state))
2270 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002271
2272 WARN_ON_ONCE(work > weight);
2273
2274 budget -= work;
2275
2276 local_irq_disable();
2277
2278 /* Drivers must not modify the NAPI state if they
2279 * consume the entire weight. In such cases this code
2280 * still "owns" the NAPI instance and therefore can
2281 * move the instance around on the list at-will.
2282 */
David S. Millerfed17f32008-01-07 21:00:40 -08002283 if (unlikely(work == weight)) {
2284 if (unlikely(napi_disable_pending(n)))
2285 __napi_complete(n);
2286 else
2287 list_move_tail(&n->poll_list, list);
2288 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002289
2290 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 }
2292out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002293 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002294
Chris Leechdb217332006-06-17 21:24:58 -07002295#ifdef CONFIG_NET_DMA
2296 /*
2297 * There may not be any more sk_buffs coming right now, so push
2298 * any pending DMA copies to hardware
2299 */
Dan Williamsd379b012007-07-09 11:56:42 -07002300 if (!cpus_empty(net_dma.channel_mask)) {
2301 int chan_idx;
2302 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2303 struct dma_chan *chan = net_dma.channels[chan_idx];
2304 if (chan)
2305 dma_async_memcpy_issue_pending(chan);
2306 }
Chris Leechdb217332006-06-17 21:24:58 -07002307 }
2308#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 return;
2311
2312softnet_break:
2313 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2314 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2315 goto out;
2316}
2317
2318static gifconf_func_t * gifconf_list [NPROTO];
2319
2320/**
2321 * register_gifconf - register a SIOCGIF handler
2322 * @family: Address family
2323 * @gifconf: Function handler
2324 *
2325 * Register protocol dependent address dumping routines. The handler
2326 * that is passed must not be freed or reused until it has been replaced
2327 * by another handler.
2328 */
2329int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2330{
2331 if (family >= NPROTO)
2332 return -EINVAL;
2333 gifconf_list[family] = gifconf;
2334 return 0;
2335}
2336
2337
2338/*
2339 * Map an interface index to its name (SIOCGIFNAME)
2340 */
2341
2342/*
2343 * We need this ioctl for efficient implementation of the
2344 * if_indextoname() function required by the IPv6 API. Without
2345 * it, we would have to search all the interfaces to find a
2346 * match. --pb
2347 */
2348
Eric W. Biederman881d9662007-09-17 11:56:21 -07002349static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350{
2351 struct net_device *dev;
2352 struct ifreq ifr;
2353
2354 /*
2355 * Fetch the caller's info block.
2356 */
2357
2358 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2359 return -EFAULT;
2360
2361 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002362 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 if (!dev) {
2364 read_unlock(&dev_base_lock);
2365 return -ENODEV;
2366 }
2367
2368 strcpy(ifr.ifr_name, dev->name);
2369 read_unlock(&dev_base_lock);
2370
2371 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2372 return -EFAULT;
2373 return 0;
2374}
2375
2376/*
2377 * Perform a SIOCGIFCONF call. This structure will change
2378 * size eventually, and there is nothing I can do about it.
2379 * Thus we will need a 'compatibility mode'.
2380 */
2381
Eric W. Biederman881d9662007-09-17 11:56:21 -07002382static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383{
2384 struct ifconf ifc;
2385 struct net_device *dev;
2386 char __user *pos;
2387 int len;
2388 int total;
2389 int i;
2390
2391 /*
2392 * Fetch the caller's info block.
2393 */
2394
2395 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2396 return -EFAULT;
2397
2398 pos = ifc.ifc_buf;
2399 len = ifc.ifc_len;
2400
2401 /*
2402 * Loop over the interfaces, and write an info block for each.
2403 */
2404
2405 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002406 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 for (i = 0; i < NPROTO; i++) {
2408 if (gifconf_list[i]) {
2409 int done;
2410 if (!pos)
2411 done = gifconf_list[i](dev, NULL, 0);
2412 else
2413 done = gifconf_list[i](dev, pos + total,
2414 len - total);
2415 if (done < 0)
2416 return -EFAULT;
2417 total += done;
2418 }
2419 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
2422 /*
2423 * All done. Write the updated control block back to the caller.
2424 */
2425 ifc.ifc_len = total;
2426
2427 /*
2428 * Both BSD and Solaris return 0 here, so we do too.
2429 */
2430 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2431}
2432
2433#ifdef CONFIG_PROC_FS
2434/*
2435 * This is invoked by the /proc filesystem handler to display a device
2436 * in detail.
2437 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002439 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440{
Denis V. Luneve372c412007-11-19 22:31:54 -08002441 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002442 loff_t off;
2443 struct net_device *dev;
2444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002446 if (!*pos)
2447 return SEQ_START_TOKEN;
2448
2449 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002450 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002451 if (off++ == *pos)
2452 return dev;
2453
2454 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455}
2456
2457void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2458{
Denis V. Luneve372c412007-11-19 22:31:54 -08002459 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002461 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002462 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463}
2464
2465void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002466 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467{
2468 read_unlock(&dev_base_lock);
2469}
2470
2471static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2472{
Rusty Russellc45d2862007-03-28 14:29:08 -07002473 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Rusty Russell5a1b5892007-04-28 21:04:03 -07002475 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2476 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2477 dev->name, stats->rx_bytes, stats->rx_packets,
2478 stats->rx_errors,
2479 stats->rx_dropped + stats->rx_missed_errors,
2480 stats->rx_fifo_errors,
2481 stats->rx_length_errors + stats->rx_over_errors +
2482 stats->rx_crc_errors + stats->rx_frame_errors,
2483 stats->rx_compressed, stats->multicast,
2484 stats->tx_bytes, stats->tx_packets,
2485 stats->tx_errors, stats->tx_dropped,
2486 stats->tx_fifo_errors, stats->collisions,
2487 stats->tx_carrier_errors +
2488 stats->tx_aborted_errors +
2489 stats->tx_window_errors +
2490 stats->tx_heartbeat_errors,
2491 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492}
2493
2494/*
2495 * Called from the PROCfs module. This now uses the new arbitrary sized
2496 * /proc/net interface to create /proc/net/dev
2497 */
2498static int dev_seq_show(struct seq_file *seq, void *v)
2499{
2500 if (v == SEQ_START_TOKEN)
2501 seq_puts(seq, "Inter-| Receive "
2502 " | Transmit\n"
2503 " face |bytes packets errs drop fifo frame "
2504 "compressed multicast|bytes packets errs "
2505 "drop fifo colls carrier compressed\n");
2506 else
2507 dev_seq_printf_stats(seq, v);
2508 return 0;
2509}
2510
2511static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2512{
2513 struct netif_rx_stats *rc = NULL;
2514
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002515 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002516 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 rc = &per_cpu(netdev_rx_stat, *pos);
2518 break;
2519 } else
2520 ++*pos;
2521 return rc;
2522}
2523
2524static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2525{
2526 return softnet_get_online(pos);
2527}
2528
2529static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2530{
2531 ++*pos;
2532 return softnet_get_online(pos);
2533}
2534
2535static void softnet_seq_stop(struct seq_file *seq, void *v)
2536{
2537}
2538
2539static int softnet_seq_show(struct seq_file *seq, void *v)
2540{
2541 struct netif_rx_stats *s = v;
2542
2543 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002544 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002545 0, 0, 0, 0, /* was fastroute */
2546 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 return 0;
2548}
2549
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002550static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 .start = dev_seq_start,
2552 .next = dev_seq_next,
2553 .stop = dev_seq_stop,
2554 .show = dev_seq_show,
2555};
2556
2557static int dev_seq_open(struct inode *inode, struct file *file)
2558{
Denis V. Luneve372c412007-11-19 22:31:54 -08002559 return seq_open_net(inode, file, &dev_seq_ops,
2560 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
Arjan van de Ven9a321442007-02-12 00:55:35 -08002563static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 .owner = THIS_MODULE,
2565 .open = dev_seq_open,
2566 .read = seq_read,
2567 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002568 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569};
2570
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002571static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 .start = softnet_seq_start,
2573 .next = softnet_seq_next,
2574 .stop = softnet_seq_stop,
2575 .show = softnet_seq_show,
2576};
2577
2578static int softnet_seq_open(struct inode *inode, struct file *file)
2579{
2580 return seq_open(file, &softnet_seq_ops);
2581}
2582
Arjan van de Ven9a321442007-02-12 00:55:35 -08002583static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 .owner = THIS_MODULE,
2585 .open = softnet_seq_open,
2586 .read = seq_read,
2587 .llseek = seq_lseek,
2588 .release = seq_release,
2589};
2590
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002591static void *ptype_get_idx(loff_t pos)
2592{
2593 struct packet_type *pt = NULL;
2594 loff_t i = 0;
2595 int t;
2596
2597 list_for_each_entry_rcu(pt, &ptype_all, list) {
2598 if (i == pos)
2599 return pt;
2600 ++i;
2601 }
2602
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002603 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002604 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2605 if (i == pos)
2606 return pt;
2607 ++i;
2608 }
2609 }
2610 return NULL;
2611}
2612
2613static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002614 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002615{
2616 rcu_read_lock();
2617 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2618}
2619
2620static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2621{
2622 struct packet_type *pt;
2623 struct list_head *nxt;
2624 int hash;
2625
2626 ++*pos;
2627 if (v == SEQ_START_TOKEN)
2628 return ptype_get_idx(0);
2629
2630 pt = v;
2631 nxt = pt->list.next;
2632 if (pt->type == htons(ETH_P_ALL)) {
2633 if (nxt != &ptype_all)
2634 goto found;
2635 hash = 0;
2636 nxt = ptype_base[0].next;
2637 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002638 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002639
2640 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002641 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002642 return NULL;
2643 nxt = ptype_base[hash].next;
2644 }
2645found:
2646 return list_entry(nxt, struct packet_type, list);
2647}
2648
2649static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002650 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002651{
2652 rcu_read_unlock();
2653}
2654
2655static void ptype_seq_decode(struct seq_file *seq, void *sym)
2656{
2657#ifdef CONFIG_KALLSYMS
2658 unsigned long offset = 0, symsize;
2659 const char *symname;
2660 char *modname;
2661 char namebuf[128];
2662
2663 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2664 &modname, namebuf);
2665
2666 if (symname) {
2667 char *delim = ":";
2668
2669 if (!modname)
2670 modname = delim = "";
2671 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2672 symname, offset);
2673 return;
2674 }
2675#endif
2676
2677 seq_printf(seq, "[%p]", sym);
2678}
2679
2680static int ptype_seq_show(struct seq_file *seq, void *v)
2681{
2682 struct packet_type *pt = v;
2683
2684 if (v == SEQ_START_TOKEN)
2685 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002686 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002687 if (pt->type == htons(ETH_P_ALL))
2688 seq_puts(seq, "ALL ");
2689 else
2690 seq_printf(seq, "%04x", ntohs(pt->type));
2691
2692 seq_printf(seq, " %-8s ",
2693 pt->dev ? pt->dev->name : "");
2694 ptype_seq_decode(seq, pt->func);
2695 seq_putc(seq, '\n');
2696 }
2697
2698 return 0;
2699}
2700
2701static const struct seq_operations ptype_seq_ops = {
2702 .start = ptype_seq_start,
2703 .next = ptype_seq_next,
2704 .stop = ptype_seq_stop,
2705 .show = ptype_seq_show,
2706};
2707
2708static int ptype_seq_open(struct inode *inode, struct file *file)
2709{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002710 return seq_open_net(inode, file, &ptype_seq_ops,
2711 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002712}
2713
2714static const struct file_operations ptype_seq_fops = {
2715 .owner = THIS_MODULE,
2716 .open = ptype_seq_open,
2717 .read = seq_read,
2718 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002719 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002720};
2721
2722
Pavel Emelyanov46650792007-10-08 20:38:39 -07002723static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724{
2725 int rc = -ENOMEM;
2726
Eric W. Biederman881d9662007-09-17 11:56:21 -07002727 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002729 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002731 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002732 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002733
Eric W. Biederman881d9662007-09-17 11:56:21 -07002734 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002735 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 rc = 0;
2737out:
2738 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002739out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002740 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002742 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002744 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 goto out;
2746}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002747
Pavel Emelyanov46650792007-10-08 20:38:39 -07002748static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002749{
2750 wext_proc_exit(net);
2751
2752 proc_net_remove(net, "ptype");
2753 proc_net_remove(net, "softnet_stat");
2754 proc_net_remove(net, "dev");
2755}
2756
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002757static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002758 .init = dev_proc_net_init,
2759 .exit = dev_proc_net_exit,
2760};
2761
2762static int __init dev_proc_init(void)
2763{
2764 return register_pernet_subsys(&dev_proc_ops);
2765}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766#else
2767#define dev_proc_init() 0
2768#endif /* CONFIG_PROC_FS */
2769
2770
2771/**
2772 * netdev_set_master - set up master/slave pair
2773 * @slave: slave device
2774 * @master: new master device
2775 *
2776 * Changes the master device of the slave. Pass %NULL to break the
2777 * bonding. The caller must hold the RTNL semaphore. On a failure
2778 * a negative errno code is returned. On success the reference counts
2779 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2780 * function returns zero.
2781 */
2782int netdev_set_master(struct net_device *slave, struct net_device *master)
2783{
2784 struct net_device *old = slave->master;
2785
2786 ASSERT_RTNL();
2787
2788 if (master) {
2789 if (old)
2790 return -EBUSY;
2791 dev_hold(master);
2792 }
2793
2794 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 synchronize_net();
2797
2798 if (old)
2799 dev_put(old);
2800
2801 if (master)
2802 slave->flags |= IFF_SLAVE;
2803 else
2804 slave->flags &= ~IFF_SLAVE;
2805
2806 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2807 return 0;
2808}
2809
Wang Chendad9b332008-06-18 01:48:28 -07002810static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002811{
2812 unsigned short old_flags = dev->flags;
2813
Patrick McHardy24023452007-07-14 18:51:31 -07002814 ASSERT_RTNL();
2815
Wang Chendad9b332008-06-18 01:48:28 -07002816 dev->flags |= IFF_PROMISC;
2817 dev->promiscuity += inc;
2818 if (dev->promiscuity == 0) {
2819 /*
2820 * Avoid overflow.
2821 * If inc causes overflow, untouch promisc and return error.
2822 */
2823 if (inc < 0)
2824 dev->flags &= ~IFF_PROMISC;
2825 else {
2826 dev->promiscuity -= inc;
2827 printk(KERN_WARNING "%s: promiscuity touches roof, "
2828 "set promiscuity failed, promiscuity feature "
2829 "of device might be broken.\n", dev->name);
2830 return -EOVERFLOW;
2831 }
2832 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002833 if (dev->flags != old_flags) {
2834 printk(KERN_INFO "device %s %s promiscuous mode\n",
2835 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2836 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002837 if (audit_enabled)
2838 audit_log(current->audit_context, GFP_ATOMIC,
2839 AUDIT_ANOM_PROMISCUOUS,
2840 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2841 dev->name, (dev->flags & IFF_PROMISC),
2842 (old_flags & IFF_PROMISC),
2843 audit_get_loginuid(current),
2844 current->uid, current->gid,
2845 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002846
2847 if (dev->change_rx_flags)
2848 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002849 }
Wang Chendad9b332008-06-18 01:48:28 -07002850 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002851}
2852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853/**
2854 * dev_set_promiscuity - update promiscuity count on a device
2855 * @dev: device
2856 * @inc: modifier
2857 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002858 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 * remains above zero the interface remains promiscuous. Once it hits zero
2860 * the device reverts back to normal filtering operation. A negative inc
2861 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002862 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 */
Wang Chendad9b332008-06-18 01:48:28 -07002864int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
2866 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002867 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868
Wang Chendad9b332008-06-18 01:48:28 -07002869 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002870 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002871 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002872 if (dev->flags != old_flags)
2873 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002874 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875}
2876
2877/**
2878 * dev_set_allmulti - update allmulti count on a device
2879 * @dev: device
2880 * @inc: modifier
2881 *
2882 * Add or remove reception of all multicast frames to a device. While the
2883 * count in the device remains above zero the interface remains listening
2884 * to all interfaces. Once it hits zero the device reverts back to normal
2885 * filtering operation. A negative @inc value is used to drop the counter
2886 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002887 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 */
2889
Wang Chendad9b332008-06-18 01:48:28 -07002890int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891{
2892 unsigned short old_flags = dev->flags;
2893
Patrick McHardy24023452007-07-14 18:51:31 -07002894 ASSERT_RTNL();
2895
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07002897 dev->allmulti += inc;
2898 if (dev->allmulti == 0) {
2899 /*
2900 * Avoid overflow.
2901 * If inc causes overflow, untouch allmulti and return error.
2902 */
2903 if (inc < 0)
2904 dev->flags &= ~IFF_ALLMULTI;
2905 else {
2906 dev->allmulti -= inc;
2907 printk(KERN_WARNING "%s: allmulti touches roof, "
2908 "set allmulti failed, allmulti feature of "
2909 "device might be broken.\n", dev->name);
2910 return -EOVERFLOW;
2911 }
2912 }
Patrick McHardy24023452007-07-14 18:51:31 -07002913 if (dev->flags ^ old_flags) {
2914 if (dev->change_rx_flags)
2915 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07002916 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07002917 }
Wang Chendad9b332008-06-18 01:48:28 -07002918 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002919}
2920
2921/*
2922 * Upload unicast and multicast address lists to device and
2923 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08002924 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07002925 * are present.
2926 */
2927void __dev_set_rx_mode(struct net_device *dev)
2928{
2929 /* dev_open will call this function so the list will stay sane. */
2930 if (!(dev->flags&IFF_UP))
2931 return;
2932
2933 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09002934 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07002935
2936 if (dev->set_rx_mode)
2937 dev->set_rx_mode(dev);
2938 else {
2939 /* Unicast addresses changes may only happen under the rtnl,
2940 * therefore calling __dev_set_promiscuity here is safe.
2941 */
2942 if (dev->uc_count > 0 && !dev->uc_promisc) {
2943 __dev_set_promiscuity(dev, 1);
2944 dev->uc_promisc = 1;
2945 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2946 __dev_set_promiscuity(dev, -1);
2947 dev->uc_promisc = 0;
2948 }
2949
2950 if (dev->set_multicast_list)
2951 dev->set_multicast_list(dev);
2952 }
2953}
2954
2955void dev_set_rx_mode(struct net_device *dev)
2956{
2957 netif_tx_lock_bh(dev);
2958 __dev_set_rx_mode(dev);
2959 netif_tx_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960}
2961
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002962int __dev_addr_delete(struct dev_addr_list **list, int *count,
2963 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002964{
2965 struct dev_addr_list *da;
2966
2967 for (; (da = *list) != NULL; list = &da->next) {
2968 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2969 alen == da->da_addrlen) {
2970 if (glbl) {
2971 int old_glbl = da->da_gusers;
2972 da->da_gusers = 0;
2973 if (old_glbl == 0)
2974 break;
2975 }
2976 if (--da->da_users)
2977 return 0;
2978
2979 *list = da->next;
2980 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002981 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07002982 return 0;
2983 }
2984 }
2985 return -ENOENT;
2986}
2987
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002988int __dev_addr_add(struct dev_addr_list **list, int *count,
2989 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002990{
2991 struct dev_addr_list *da;
2992
2993 for (da = *list; da != NULL; da = da->next) {
2994 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2995 da->da_addrlen == alen) {
2996 if (glbl) {
2997 int old_glbl = da->da_gusers;
2998 da->da_gusers = 1;
2999 if (old_glbl)
3000 return 0;
3001 }
3002 da->da_users++;
3003 return 0;
3004 }
3005 }
3006
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003007 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003008 if (da == NULL)
3009 return -ENOMEM;
3010 memcpy(da->da_addr, addr, alen);
3011 da->da_addrlen = alen;
3012 da->da_users = 1;
3013 da->da_gusers = glbl ? 1 : 0;
3014 da->next = *list;
3015 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003016 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003017 return 0;
3018}
3019
Patrick McHardy4417da62007-06-27 01:28:10 -07003020/**
3021 * dev_unicast_delete - Release secondary unicast address.
3022 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003023 * @addr: address to delete
3024 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003025 *
3026 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003027 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003028 *
3029 * The caller must hold the rtnl_mutex.
3030 */
3031int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3032{
3033 int err;
3034
3035 ASSERT_RTNL();
3036
3037 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003038 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3039 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003040 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003041 netif_tx_unlock_bh(dev);
3042 return err;
3043}
3044EXPORT_SYMBOL(dev_unicast_delete);
3045
3046/**
3047 * dev_unicast_add - add a secondary unicast address
3048 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003049 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003050 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003051 *
3052 * Add a secondary unicast address to the device or increase
3053 * the reference count if it already exists.
3054 *
3055 * The caller must hold the rtnl_mutex.
3056 */
3057int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3058{
3059 int err;
3060
3061 ASSERT_RTNL();
3062
3063 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003064 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3065 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003066 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003067 netif_tx_unlock_bh(dev);
3068 return err;
3069}
3070EXPORT_SYMBOL(dev_unicast_add);
3071
Chris Leeche83a2ea2008-01-31 16:53:23 -08003072int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3073 struct dev_addr_list **from, int *from_count)
3074{
3075 struct dev_addr_list *da, *next;
3076 int err = 0;
3077
3078 da = *from;
3079 while (da != NULL) {
3080 next = da->next;
3081 if (!da->da_synced) {
3082 err = __dev_addr_add(to, to_count,
3083 da->da_addr, da->da_addrlen, 0);
3084 if (err < 0)
3085 break;
3086 da->da_synced = 1;
3087 da->da_users++;
3088 } else if (da->da_users == 1) {
3089 __dev_addr_delete(to, to_count,
3090 da->da_addr, da->da_addrlen, 0);
3091 __dev_addr_delete(from, from_count,
3092 da->da_addr, da->da_addrlen, 0);
3093 }
3094 da = next;
3095 }
3096 return err;
3097}
3098
3099void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3100 struct dev_addr_list **from, int *from_count)
3101{
3102 struct dev_addr_list *da, *next;
3103
3104 da = *from;
3105 while (da != NULL) {
3106 next = da->next;
3107 if (da->da_synced) {
3108 __dev_addr_delete(to, to_count,
3109 da->da_addr, da->da_addrlen, 0);
3110 da->da_synced = 0;
3111 __dev_addr_delete(from, from_count,
3112 da->da_addr, da->da_addrlen, 0);
3113 }
3114 da = next;
3115 }
3116}
3117
3118/**
3119 * dev_unicast_sync - Synchronize device's unicast list to another device
3120 * @to: destination device
3121 * @from: source device
3122 *
3123 * Add newly added addresses to the destination device and release
3124 * addresses that have no users left. The source device must be
3125 * locked by netif_tx_lock_bh.
3126 *
3127 * This function is intended to be called from the dev->set_rx_mode
3128 * function of layered software devices.
3129 */
3130int dev_unicast_sync(struct net_device *to, struct net_device *from)
3131{
3132 int err = 0;
3133
3134 netif_tx_lock_bh(to);
3135 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3136 &from->uc_list, &from->uc_count);
3137 if (!err)
3138 __dev_set_rx_mode(to);
3139 netif_tx_unlock_bh(to);
3140 return err;
3141}
3142EXPORT_SYMBOL(dev_unicast_sync);
3143
3144/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003145 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003146 * @to: destination device
3147 * @from: source device
3148 *
3149 * Remove all addresses that were added to the destination device by
3150 * dev_unicast_sync(). This function is intended to be called from the
3151 * dev->stop function of layered software devices.
3152 */
3153void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3154{
3155 netif_tx_lock_bh(from);
3156 netif_tx_lock_bh(to);
3157
3158 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3159 &from->uc_list, &from->uc_count);
3160 __dev_set_rx_mode(to);
3161
3162 netif_tx_unlock_bh(to);
3163 netif_tx_unlock_bh(from);
3164}
3165EXPORT_SYMBOL(dev_unicast_unsync);
3166
Denis Cheng12972622007-07-18 02:12:56 -07003167static void __dev_addr_discard(struct dev_addr_list **list)
3168{
3169 struct dev_addr_list *tmp;
3170
3171 while (*list != NULL) {
3172 tmp = *list;
3173 *list = tmp->next;
3174 if (tmp->da_users > tmp->da_gusers)
3175 printk("__dev_addr_discard: address leakage! "
3176 "da_users=%d\n", tmp->da_users);
3177 kfree(tmp);
3178 }
3179}
3180
Denis Cheng26cc2522007-07-18 02:12:03 -07003181static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003182{
3183 netif_tx_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003184
Patrick McHardy4417da62007-06-27 01:28:10 -07003185 __dev_addr_discard(&dev->uc_list);
3186 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003187
Denis Cheng456ad752007-07-18 02:10:54 -07003188 __dev_addr_discard(&dev->mc_list);
3189 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003190
Denis Cheng456ad752007-07-18 02:10:54 -07003191 netif_tx_unlock_bh(dev);
3192}
3193
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194unsigned dev_get_flags(const struct net_device *dev)
3195{
3196 unsigned flags;
3197
3198 flags = (dev->flags & ~(IFF_PROMISC |
3199 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003200 IFF_RUNNING |
3201 IFF_LOWER_UP |
3202 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 (dev->gflags & (IFF_PROMISC |
3204 IFF_ALLMULTI));
3205
Stefan Rompfb00055a2006-03-20 17:09:11 -08003206 if (netif_running(dev)) {
3207 if (netif_oper_up(dev))
3208 flags |= IFF_RUNNING;
3209 if (netif_carrier_ok(dev))
3210 flags |= IFF_LOWER_UP;
3211 if (netif_dormant(dev))
3212 flags |= IFF_DORMANT;
3213 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
3215 return flags;
3216}
3217
3218int dev_change_flags(struct net_device *dev, unsigned flags)
3219{
Thomas Graf7c355f52007-06-05 16:03:03 -07003220 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 int old_flags = dev->flags;
3222
Patrick McHardy24023452007-07-14 18:51:31 -07003223 ASSERT_RTNL();
3224
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 /*
3226 * Set the flags on our device.
3227 */
3228
3229 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3230 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3231 IFF_AUTOMEDIA)) |
3232 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3233 IFF_ALLMULTI));
3234
3235 /*
3236 * Load in the correct multicast list now the flags have changed.
3237 */
3238
David Woodhouse0e917962008-05-20 14:36:14 -07003239 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003240 dev->change_rx_flags(dev, IFF_MULTICAST);
3241
Patrick McHardy4417da62007-06-27 01:28:10 -07003242 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243
3244 /*
3245 * Have we downed the interface. We handle IFF_UP ourselves
3246 * according to user attempts to set it, rather than blindly
3247 * setting it.
3248 */
3249
3250 ret = 0;
3251 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3252 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3253
3254 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003255 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 }
3257
3258 if (dev->flags & IFF_UP &&
3259 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3260 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003261 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
3263 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3264 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3265 dev->gflags ^= IFF_PROMISC;
3266 dev_set_promiscuity(dev, inc);
3267 }
3268
3269 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3270 is important. Some (broken) drivers set IFF_PROMISC, when
3271 IFF_ALLMULTI is requested not asking us and not reporting.
3272 */
3273 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3274 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3275 dev->gflags ^= IFF_ALLMULTI;
3276 dev_set_allmulti(dev, inc);
3277 }
3278
Thomas Graf7c355f52007-06-05 16:03:03 -07003279 /* Exclude state transition flags, already notified */
3280 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3281 if (changes)
3282 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
3284 return ret;
3285}
3286
3287int dev_set_mtu(struct net_device *dev, int new_mtu)
3288{
3289 int err;
3290
3291 if (new_mtu == dev->mtu)
3292 return 0;
3293
3294 /* MTU must be positive. */
3295 if (new_mtu < 0)
3296 return -EINVAL;
3297
3298 if (!netif_device_present(dev))
3299 return -ENODEV;
3300
3301 err = 0;
3302 if (dev->change_mtu)
3303 err = dev->change_mtu(dev, new_mtu);
3304 else
3305 dev->mtu = new_mtu;
3306 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003307 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 return err;
3309}
3310
3311int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3312{
3313 int err;
3314
3315 if (!dev->set_mac_address)
3316 return -EOPNOTSUPP;
3317 if (sa->sa_family != dev->type)
3318 return -EINVAL;
3319 if (!netif_device_present(dev))
3320 return -ENODEV;
3321 err = dev->set_mac_address(dev, sa);
3322 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003323 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 return err;
3325}
3326
3327/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003328 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003330static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331{
3332 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003333 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
3335 if (!dev)
3336 return -ENODEV;
3337
3338 switch (cmd) {
3339 case SIOCGIFFLAGS: /* Get interface flags */
3340 ifr->ifr_flags = dev_get_flags(dev);
3341 return 0;
3342
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 case SIOCGIFMETRIC: /* Get the metric on the interface
3344 (currently unused) */
3345 ifr->ifr_metric = 0;
3346 return 0;
3347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 case SIOCGIFMTU: /* Get the MTU of a device */
3349 ifr->ifr_mtu = dev->mtu;
3350 return 0;
3351
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 case SIOCGIFHWADDR:
3353 if (!dev->addr_len)
3354 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3355 else
3356 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3357 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3358 ifr->ifr_hwaddr.sa_family = dev->type;
3359 return 0;
3360
Jeff Garzik14e3e072007-10-08 00:06:32 -07003361 case SIOCGIFSLAVE:
3362 err = -EINVAL;
3363 break;
3364
3365 case SIOCGIFMAP:
3366 ifr->ifr_map.mem_start = dev->mem_start;
3367 ifr->ifr_map.mem_end = dev->mem_end;
3368 ifr->ifr_map.base_addr = dev->base_addr;
3369 ifr->ifr_map.irq = dev->irq;
3370 ifr->ifr_map.dma = dev->dma;
3371 ifr->ifr_map.port = dev->if_port;
3372 return 0;
3373
3374 case SIOCGIFINDEX:
3375 ifr->ifr_ifindex = dev->ifindex;
3376 return 0;
3377
3378 case SIOCGIFTXQLEN:
3379 ifr->ifr_qlen = dev->tx_queue_len;
3380 return 0;
3381
3382 default:
3383 /* dev_ioctl() should ensure this case
3384 * is never reached
3385 */
3386 WARN_ON(1);
3387 err = -EINVAL;
3388 break;
3389
3390 }
3391 return err;
3392}
3393
3394/*
3395 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3396 */
3397static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3398{
3399 int err;
3400 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3401
3402 if (!dev)
3403 return -ENODEV;
3404
3405 switch (cmd) {
3406 case SIOCSIFFLAGS: /* Set interface flags */
3407 return dev_change_flags(dev, ifr->ifr_flags);
3408
3409 case SIOCSIFMETRIC: /* Set the metric on the interface
3410 (currently unused) */
3411 return -EOPNOTSUPP;
3412
3413 case SIOCSIFMTU: /* Set the MTU of a device */
3414 return dev_set_mtu(dev, ifr->ifr_mtu);
3415
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 case SIOCSIFHWADDR:
3417 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3418
3419 case SIOCSIFHWBROADCAST:
3420 if (ifr->ifr_hwaddr.sa_family != dev->type)
3421 return -EINVAL;
3422 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3423 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003424 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 return 0;
3426
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 case SIOCSIFMAP:
3428 if (dev->set_config) {
3429 if (!netif_device_present(dev))
3430 return -ENODEV;
3431 return dev->set_config(dev, &ifr->ifr_map);
3432 }
3433 return -EOPNOTSUPP;
3434
3435 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003436 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3438 return -EINVAL;
3439 if (!netif_device_present(dev))
3440 return -ENODEV;
3441 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3442 dev->addr_len, 1);
3443
3444 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003445 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3447 return -EINVAL;
3448 if (!netif_device_present(dev))
3449 return -ENODEV;
3450 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3451 dev->addr_len, 1);
3452
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 case SIOCSIFTXQLEN:
3454 if (ifr->ifr_qlen < 0)
3455 return -EINVAL;
3456 dev->tx_queue_len = ifr->ifr_qlen;
3457 return 0;
3458
3459 case SIOCSIFNAME:
3460 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3461 return dev_change_name(dev, ifr->ifr_newname);
3462
3463 /*
3464 * Unknown or private ioctl
3465 */
3466
3467 default:
3468 if ((cmd >= SIOCDEVPRIVATE &&
3469 cmd <= SIOCDEVPRIVATE + 15) ||
3470 cmd == SIOCBONDENSLAVE ||
3471 cmd == SIOCBONDRELEASE ||
3472 cmd == SIOCBONDSETHWADDR ||
3473 cmd == SIOCBONDSLAVEINFOQUERY ||
3474 cmd == SIOCBONDINFOQUERY ||
3475 cmd == SIOCBONDCHANGEACTIVE ||
3476 cmd == SIOCGMIIPHY ||
3477 cmd == SIOCGMIIREG ||
3478 cmd == SIOCSMIIREG ||
3479 cmd == SIOCBRADDIF ||
3480 cmd == SIOCBRDELIF ||
3481 cmd == SIOCWANDEV) {
3482 err = -EOPNOTSUPP;
3483 if (dev->do_ioctl) {
3484 if (netif_device_present(dev))
3485 err = dev->do_ioctl(dev, ifr,
3486 cmd);
3487 else
3488 err = -ENODEV;
3489 }
3490 } else
3491 err = -EINVAL;
3492
3493 }
3494 return err;
3495}
3496
3497/*
3498 * This function handles all "interface"-type I/O control requests. The actual
3499 * 'doing' part of this is dev_ifsioc above.
3500 */
3501
3502/**
3503 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003504 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 * @cmd: command to issue
3506 * @arg: pointer to a struct ifreq in user space
3507 *
3508 * Issue ioctl functions to devices. This is normally called by the
3509 * user space syscall interfaces but can sometimes be useful for
3510 * other purposes. The return value is the return from the syscall if
3511 * positive or a negative errno code on error.
3512 */
3513
Eric W. Biederman881d9662007-09-17 11:56:21 -07003514int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515{
3516 struct ifreq ifr;
3517 int ret;
3518 char *colon;
3519
3520 /* One special case: SIOCGIFCONF takes ifconf argument
3521 and requires shared lock, because it sleeps writing
3522 to user space.
3523 */
3524
3525 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003526 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003527 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003528 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 return ret;
3530 }
3531 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003532 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
3534 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3535 return -EFAULT;
3536
3537 ifr.ifr_name[IFNAMSIZ-1] = 0;
3538
3539 colon = strchr(ifr.ifr_name, ':');
3540 if (colon)
3541 *colon = 0;
3542
3543 /*
3544 * See which interface the caller is talking about.
3545 */
3546
3547 switch (cmd) {
3548 /*
3549 * These ioctl calls:
3550 * - can be done by all.
3551 * - atomic and do not require locking.
3552 * - return a value
3553 */
3554 case SIOCGIFFLAGS:
3555 case SIOCGIFMETRIC:
3556 case SIOCGIFMTU:
3557 case SIOCGIFHWADDR:
3558 case SIOCGIFSLAVE:
3559 case SIOCGIFMAP:
3560 case SIOCGIFINDEX:
3561 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003562 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003564 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 read_unlock(&dev_base_lock);
3566 if (!ret) {
3567 if (colon)
3568 *colon = ':';
3569 if (copy_to_user(arg, &ifr,
3570 sizeof(struct ifreq)))
3571 ret = -EFAULT;
3572 }
3573 return ret;
3574
3575 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003576 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003578 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003579 rtnl_unlock();
3580 if (!ret) {
3581 if (colon)
3582 *colon = ':';
3583 if (copy_to_user(arg, &ifr,
3584 sizeof(struct ifreq)))
3585 ret = -EFAULT;
3586 }
3587 return ret;
3588
3589 /*
3590 * These ioctl calls:
3591 * - require superuser power.
3592 * - require strict serialization.
3593 * - return a value
3594 */
3595 case SIOCGMIIPHY:
3596 case SIOCGMIIREG:
3597 case SIOCSIFNAME:
3598 if (!capable(CAP_NET_ADMIN))
3599 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003600 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003602 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 rtnl_unlock();
3604 if (!ret) {
3605 if (colon)
3606 *colon = ':';
3607 if (copy_to_user(arg, &ifr,
3608 sizeof(struct ifreq)))
3609 ret = -EFAULT;
3610 }
3611 return ret;
3612
3613 /*
3614 * These ioctl calls:
3615 * - require superuser power.
3616 * - require strict serialization.
3617 * - do not return a value
3618 */
3619 case SIOCSIFFLAGS:
3620 case SIOCSIFMETRIC:
3621 case SIOCSIFMTU:
3622 case SIOCSIFMAP:
3623 case SIOCSIFHWADDR:
3624 case SIOCSIFSLAVE:
3625 case SIOCADDMULTI:
3626 case SIOCDELMULTI:
3627 case SIOCSIFHWBROADCAST:
3628 case SIOCSIFTXQLEN:
3629 case SIOCSMIIREG:
3630 case SIOCBONDENSLAVE:
3631 case SIOCBONDRELEASE:
3632 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 case SIOCBONDCHANGEACTIVE:
3634 case SIOCBRADDIF:
3635 case SIOCBRDELIF:
3636 if (!capable(CAP_NET_ADMIN))
3637 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003638 /* fall through */
3639 case SIOCBONDSLAVEINFOQUERY:
3640 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003641 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003642 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003643 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644 rtnl_unlock();
3645 return ret;
3646
3647 case SIOCGIFMEM:
3648 /* Get the per device memory space. We can add this but
3649 * currently do not support it */
3650 case SIOCSIFMEM:
3651 /* Set the per device memory buffer space.
3652 * Not applicable in our case */
3653 case SIOCSIFLINK:
3654 return -EINVAL;
3655
3656 /*
3657 * Unknown or private ioctl.
3658 */
3659 default:
3660 if (cmd == SIOCWANDEV ||
3661 (cmd >= SIOCDEVPRIVATE &&
3662 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003663 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003665 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 rtnl_unlock();
3667 if (!ret && copy_to_user(arg, &ifr,
3668 sizeof(struct ifreq)))
3669 ret = -EFAULT;
3670 return ret;
3671 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003673 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003674 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 return -EINVAL;
3676 }
3677}
3678
3679
3680/**
3681 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003682 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 *
3684 * Returns a suitable unique value for a new device interface
3685 * number. The caller must hold the rtnl semaphore or the
3686 * dev_base_lock to be sure it remains unique.
3687 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003688static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689{
3690 static int ifindex;
3691 for (;;) {
3692 if (++ifindex <= 0)
3693 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003694 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 return ifindex;
3696 }
3697}
3698
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699/* Delayed registration/unregisteration */
3700static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003701static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003703static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704{
3705 spin_lock(&net_todo_list_lock);
3706 list_add_tail(&dev->todo_list, &net_todo_list);
3707 spin_unlock(&net_todo_list_lock);
3708}
3709
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003710static void rollback_registered(struct net_device *dev)
3711{
3712 BUG_ON(dev_boot_phase);
3713 ASSERT_RTNL();
3714
3715 /* Some devices call without registering for initialization unwind. */
3716 if (dev->reg_state == NETREG_UNINITIALIZED) {
3717 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3718 "was registered\n", dev->name, dev);
3719
3720 WARN_ON(1);
3721 return;
3722 }
3723
3724 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3725
3726 /* If device is running, close it first. */
3727 dev_close(dev);
3728
3729 /* And unlink it from device chain. */
3730 unlist_netdevice(dev);
3731
3732 dev->reg_state = NETREG_UNREGISTERING;
3733
3734 synchronize_net();
3735
3736 /* Shutdown queueing discipline. */
3737 dev_shutdown(dev);
3738
3739
3740 /* Notify protocols, that we are about to destroy
3741 this device. They should clean all the things.
3742 */
3743 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3744
3745 /*
3746 * Flush the unicast and multicast chains
3747 */
3748 dev_addr_discard(dev);
3749
3750 if (dev->uninit)
3751 dev->uninit(dev);
3752
3753 /* Notifier chain MUST detach us from master device. */
3754 BUG_TRAP(!dev->master);
3755
3756 /* Remove entries from kobject tree */
3757 netdev_unregister_kobject(dev);
3758
3759 synchronize_net();
3760
3761 dev_put(dev);
3762}
3763
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764/**
3765 * register_netdevice - register a network device
3766 * @dev: device to register
3767 *
3768 * Take a completed network device structure and add it to the kernel
3769 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3770 * chain. 0 is returned on success. A negative errno code is returned
3771 * on a failure to set up the device, or if the name is a duplicate.
3772 *
3773 * Callers must hold the rtnl semaphore. You may want
3774 * register_netdev() instead of this.
3775 *
3776 * BUGS:
3777 * The locking appears insufficient to guarantee two parallel registers
3778 * will not get the same name.
3779 */
3780
3781int register_netdevice(struct net_device *dev)
3782{
3783 struct hlist_head *head;
3784 struct hlist_node *p;
3785 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003786 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
3788 BUG_ON(dev_boot_phase);
3789 ASSERT_RTNL();
3790
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003791 might_sleep();
3792
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 /* When net_device's are persistent, this will be fatal. */
3794 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003795 BUG_ON(!dev_net(dev));
3796 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Herbert Xu932ff272006-06-09 12:20:56 -07003798 spin_lock_init(&dev->_xmit_lock);
Jarek Poplawski723e98b2007-05-15 22:46:18 -07003799 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 dev->xmit_lock_owner = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 dev->iflink = -1;
3803
3804 /* Init, if this function is available */
3805 if (dev->init) {
3806 ret = dev->init(dev);
3807 if (ret) {
3808 if (ret > 0)
3809 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003810 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811 }
3812 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003813
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 if (!dev_valid_name(dev->name)) {
3815 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003816 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 }
3818
Eric W. Biederman881d9662007-09-17 11:56:21 -07003819 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 if (dev->iflink == -1)
3821 dev->iflink = dev->ifindex;
3822
3823 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003824 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 hlist_for_each(p, head) {
3826 struct net_device *d
3827 = hlist_entry(p, struct net_device, name_hlist);
3828 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3829 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003830 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003834 /* Fix illegal checksum combinations */
3835 if ((dev->features & NETIF_F_HW_CSUM) &&
3836 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3837 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3838 dev->name);
3839 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3840 }
3841
3842 if ((dev->features & NETIF_F_NO_CSUM) &&
3843 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3844 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3845 dev->name);
3846 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3847 }
3848
3849
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 /* Fix illegal SG+CSUM combinations. */
3851 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003852 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003853 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 dev->name);
3855 dev->features &= ~NETIF_F_SG;
3856 }
3857
3858 /* TSO requires that SG is present as well. */
3859 if ((dev->features & NETIF_F_TSO) &&
3860 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003861 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862 dev->name);
3863 dev->features &= ~NETIF_F_TSO;
3864 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003865 if (dev->features & NETIF_F_UFO) {
3866 if (!(dev->features & NETIF_F_HW_CSUM)) {
3867 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3868 "NETIF_F_HW_CSUM feature.\n",
3869 dev->name);
3870 dev->features &= ~NETIF_F_UFO;
3871 }
3872 if (!(dev->features & NETIF_F_SG)) {
3873 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3874 "NETIF_F_SG feature.\n",
3875 dev->name);
3876 dev->features &= ~NETIF_F_UFO;
3877 }
3878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07003880 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07003881 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003882 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003883 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003884 dev->reg_state = NETREG_REGISTERED;
3885
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 /*
3887 * Default initial state at registry is that the
3888 * device is present.
3889 */
3890
3891 set_bit(__LINK_STATE_PRESENT, &dev->state);
3892
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02003895 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896
3897 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003898 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07003899 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003900 if (ret) {
3901 rollback_registered(dev);
3902 dev->reg_state = NETREG_UNREGISTERED;
3903 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904
3905out:
3906 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003907
3908err_uninit:
3909 if (dev->uninit)
3910 dev->uninit(dev);
3911 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912}
3913
3914/**
3915 * register_netdev - register a network device
3916 * @dev: device to register
3917 *
3918 * Take a completed network device structure and add it to the kernel
3919 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3920 * chain. 0 is returned on success. A negative errno code is returned
3921 * on a failure to set up the device, or if the name is a duplicate.
3922 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07003923 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 * and expands the device name if you passed a format string to
3925 * alloc_netdev.
3926 */
3927int register_netdev(struct net_device *dev)
3928{
3929 int err;
3930
3931 rtnl_lock();
3932
3933 /*
3934 * If the name is a format string the caller wants us to do a
3935 * name allocation.
3936 */
3937 if (strchr(dev->name, '%')) {
3938 err = dev_alloc_name(dev, dev->name);
3939 if (err < 0)
3940 goto out;
3941 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003942
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 err = register_netdevice(dev);
3944out:
3945 rtnl_unlock();
3946 return err;
3947}
3948EXPORT_SYMBOL(register_netdev);
3949
3950/*
3951 * netdev_wait_allrefs - wait until all references are gone.
3952 *
3953 * This is called when unregistering network devices.
3954 *
3955 * Any protocol or device that holds a reference should register
3956 * for netdevice notification, and cleanup and put back the
3957 * reference if they receive an UNREGISTER event.
3958 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003959 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 */
3961static void netdev_wait_allrefs(struct net_device *dev)
3962{
3963 unsigned long rebroadcast_time, warning_time;
3964
3965 rebroadcast_time = warning_time = jiffies;
3966 while (atomic_read(&dev->refcnt) != 0) {
3967 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003968 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969
3970 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003971 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972
3973 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3974 &dev->state)) {
3975 /* We must not have linkwatch events
3976 * pending on unregister. If this
3977 * happens, we simply run the queue
3978 * unscheduled, resulting in a noop
3979 * for this device.
3980 */
3981 linkwatch_run_queue();
3982 }
3983
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003984 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985
3986 rebroadcast_time = jiffies;
3987 }
3988
3989 msleep(250);
3990
3991 if (time_after(jiffies, warning_time + 10 * HZ)) {
3992 printk(KERN_EMERG "unregister_netdevice: "
3993 "waiting for %s to become free. Usage "
3994 "count = %d\n",
3995 dev->name, atomic_read(&dev->refcnt));
3996 warning_time = jiffies;
3997 }
3998 }
3999}
4000
4001/* The sequence is:
4002 *
4003 * rtnl_lock();
4004 * ...
4005 * register_netdevice(x1);
4006 * register_netdevice(x2);
4007 * ...
4008 * unregister_netdevice(y1);
4009 * unregister_netdevice(y2);
4010 * ...
4011 * rtnl_unlock();
4012 * free_netdev(y1);
4013 * free_netdev(y2);
4014 *
4015 * We are invoked by rtnl_unlock() after it drops the semaphore.
4016 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004017 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018 * without deadlocking with linkwatch via keventd.
4019 * 2) Since we run with the RTNL semaphore not held, we can sleep
4020 * safely in order to wait for the netdev refcnt to drop to zero.
4021 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004022static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023void netdev_run_todo(void)
4024{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004025 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
4027 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004028 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
4030 /* Not safe to do outside the semaphore. We must not return
4031 * until all unregister events invoked by the local processor
4032 * have been completed (either by this todo run, or one on
4033 * another cpu).
4034 */
4035 if (list_empty(&net_todo_list))
4036 goto out;
4037
4038 /* Snapshot list, allow later requests */
4039 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004040 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004042
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 while (!list_empty(&list)) {
4044 struct net_device *dev
4045 = list_entry(list.next, struct net_device, todo_list);
4046 list_del(&dev->todo_list);
4047
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004048 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 printk(KERN_ERR "network todo '%s' but state %d\n",
4050 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004051 dump_stack();
4052 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004054
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004055 dev->reg_state = NETREG_UNREGISTERED;
4056
4057 netdev_wait_allrefs(dev);
4058
4059 /* paranoia */
4060 BUG_ON(atomic_read(&dev->refcnt));
4061 BUG_TRAP(!dev->ip_ptr);
4062 BUG_TRAP(!dev->ip6_ptr);
4063 BUG_TRAP(!dev->dn_ptr);
4064
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004065 if (dev->destructor)
4066 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004067
4068 /* Free network device */
4069 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 }
4071
4072out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004073 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074}
4075
Rusty Russell5a1b5892007-04-28 21:04:03 -07004076static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004077{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004078 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004079}
4080
David S. Millerdc2b4842008-07-08 17:18:23 -07004081static void netdev_init_one_queue(struct net_device *dev,
4082 struct netdev_queue *queue)
4083{
4084 spin_lock_init(&queue->lock);
4085 queue->dev = dev;
4086}
4087
David S. Millerbb949fb2008-07-08 16:55:56 -07004088static void netdev_init_queues(struct net_device *dev)
4089{
David S. Millerdc2b4842008-07-08 17:18:23 -07004090 netdev_init_one_queue(dev, &dev->rx_queue);
4091 netdev_init_one_queue(dev, &dev->tx_queue);
David S. Millerbb949fb2008-07-08 16:55:56 -07004092}
4093
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004095 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 * @sizeof_priv: size of private data to allocate space for
4097 * @name: device name format string
4098 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004099 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 *
4101 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004102 * and performs basic initialization. Also allocates subquue structs
4103 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004105struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4106 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107{
4108 void *p;
4109 struct net_device *dev;
4110 int alloc_size;
4111
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004112 BUG_ON(strlen(name) >= sizeof(dev->name));
4113
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004114 alloc_size = sizeof(struct net_device) +
4115 sizeof(struct net_device_subqueue) * (queue_count - 1);
4116 if (sizeof_priv) {
4117 /* ensure 32-byte alignment of private area */
4118 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4119 alloc_size += sizeof_priv;
4120 }
4121 /* ensure 32-byte alignment of whole construct */
4122 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004124 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004126 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 return NULL;
4128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129
4130 dev = (struct net_device *)
4131 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4132 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004133 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004135 if (sizeof_priv) {
4136 dev->priv = ((char *)dev +
4137 ((sizeof(struct net_device) +
4138 (sizeof(struct net_device_subqueue) *
Patrick McHardy31ce72a2007-07-20 19:45:45 -07004139 (queue_count - 1)) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004140 & ~NETDEV_ALIGN_CONST));
4141 }
4142
4143 dev->egress_subqueue_count = queue_count;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004144 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145
David S. Millerbb949fb2008-07-08 16:55:56 -07004146 netdev_init_queues(dev);
4147
Rusty Russell5a1b5892007-04-28 21:04:03 -07004148 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004149 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 setup(dev);
4151 strcpy(dev->name, name);
4152 return dev;
4153}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004154EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155
4156/**
4157 * free_netdev - free network device
4158 * @dev: device
4159 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004160 * This function does the last stage of destroying an allocated device
4161 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 * If this is the last reference then it will be freed.
4163 */
4164void free_netdev(struct net_device *dev)
4165{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004166 release_net(dev_net(dev));
4167
Stephen Hemminger3041a062006-05-26 13:25:24 -07004168 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 if (dev->reg_state == NETREG_UNINITIALIZED) {
4170 kfree((char *)dev - dev->padded);
4171 return;
4172 }
4173
4174 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4175 dev->reg_state = NETREG_RELEASED;
4176
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004177 /* will free via device release */
4178 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004182void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183{
4184 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004185 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186}
4187
4188/**
4189 * unregister_netdevice - remove device from the kernel
4190 * @dev: device
4191 *
4192 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004193 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 *
4195 * Callers must hold the rtnl semaphore. You may want
4196 * unregister_netdev() instead of this.
4197 */
4198
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004199void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200{
Herbert Xua6620712007-12-12 19:21:56 -08004201 ASSERT_RTNL();
4202
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004203 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 /* Finish processing unregister after unlock */
4205 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206}
4207
4208/**
4209 * unregister_netdev - remove device from the kernel
4210 * @dev: device
4211 *
4212 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004213 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 *
4215 * This is just a wrapper for unregister_netdevice that takes
4216 * the rtnl semaphore. In general you want to use this and not
4217 * unregister_netdevice.
4218 */
4219void unregister_netdev(struct net_device *dev)
4220{
4221 rtnl_lock();
4222 unregister_netdevice(dev);
4223 rtnl_unlock();
4224}
4225
4226EXPORT_SYMBOL(unregister_netdev);
4227
Eric W. Biedermance286d32007-09-12 13:53:49 +02004228/**
4229 * dev_change_net_namespace - move device to different nethost namespace
4230 * @dev: device
4231 * @net: network namespace
4232 * @pat: If not NULL name pattern to try if the current device name
4233 * is already taken in the destination network namespace.
4234 *
4235 * This function shuts down a device interface and moves it
4236 * to a new network namespace. On success 0 is returned, on
4237 * a failure a netagive errno code is returned.
4238 *
4239 * Callers must hold the rtnl semaphore.
4240 */
4241
4242int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4243{
4244 char buf[IFNAMSIZ];
4245 const char *destname;
4246 int err;
4247
4248 ASSERT_RTNL();
4249
4250 /* Don't allow namespace local devices to be moved. */
4251 err = -EINVAL;
4252 if (dev->features & NETIF_F_NETNS_LOCAL)
4253 goto out;
4254
4255 /* Ensure the device has been registrered */
4256 err = -EINVAL;
4257 if (dev->reg_state != NETREG_REGISTERED)
4258 goto out;
4259
4260 /* Get out if there is nothing todo */
4261 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004262 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004263 goto out;
4264
4265 /* Pick the destination device name, and ensure
4266 * we can use it in the destination network namespace.
4267 */
4268 err = -EEXIST;
4269 destname = dev->name;
4270 if (__dev_get_by_name(net, destname)) {
4271 /* We get here if we can't use the current device name */
4272 if (!pat)
4273 goto out;
4274 if (!dev_valid_name(pat))
4275 goto out;
4276 if (strchr(pat, '%')) {
4277 if (__dev_alloc_name(net, pat, buf) < 0)
4278 goto out;
4279 destname = buf;
4280 } else
4281 destname = pat;
4282 if (__dev_get_by_name(net, destname))
4283 goto out;
4284 }
4285
4286 /*
4287 * And now a mini version of register_netdevice unregister_netdevice.
4288 */
4289
4290 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004291 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004292
4293 /* And unlink it from device chain */
4294 err = -ENODEV;
4295 unlist_netdevice(dev);
4296
4297 synchronize_net();
4298
4299 /* Shutdown queueing discipline. */
4300 dev_shutdown(dev);
4301
4302 /* Notify protocols, that we are about to destroy
4303 this device. They should clean all the things.
4304 */
4305 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4306
4307 /*
4308 * Flush the unicast and multicast chains
4309 */
4310 dev_addr_discard(dev);
4311
4312 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004313 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004314
4315 /* Assign the new device name */
4316 if (destname != dev->name)
4317 strcpy(dev->name, destname);
4318
4319 /* If there is an ifindex conflict assign a new one */
4320 if (__dev_get_by_index(net, dev->ifindex)) {
4321 int iflink = (dev->iflink == dev->ifindex);
4322 dev->ifindex = dev_new_index(net);
4323 if (iflink)
4324 dev->iflink = dev->ifindex;
4325 }
4326
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004327 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004328 netdev_unregister_kobject(dev);
4329 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004330 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004331
4332 /* Add the device back in the hashes */
4333 list_netdevice(dev);
4334
4335 /* Notify protocols, that a new device appeared. */
4336 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4337
4338 synchronize_net();
4339 err = 0;
4340out:
4341 return err;
4342}
4343
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344static int dev_cpu_callback(struct notifier_block *nfb,
4345 unsigned long action,
4346 void *ocpu)
4347{
4348 struct sk_buff **list_skb;
David S. Milleree609cb2008-07-08 22:58:37 -07004349 struct netdev_queue **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 struct sk_buff *skb;
4351 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4352 struct softnet_data *sd, *oldsd;
4353
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004354 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 return NOTIFY_OK;
4356
4357 local_irq_disable();
4358 cpu = smp_processor_id();
4359 sd = &per_cpu(softnet_data, cpu);
4360 oldsd = &per_cpu(softnet_data, oldcpu);
4361
4362 /* Find end of our completion_queue. */
4363 list_skb = &sd->completion_queue;
4364 while (*list_skb)
4365 list_skb = &(*list_skb)->next;
4366 /* Append completion queue from offline CPU. */
4367 *list_skb = oldsd->completion_queue;
4368 oldsd->completion_queue = NULL;
4369
4370 /* Find end of our output_queue. */
4371 list_net = &sd->output_queue;
4372 while (*list_net)
4373 list_net = &(*list_net)->next_sched;
4374 /* Append output queue from offline CPU. */
4375 *list_net = oldsd->output_queue;
4376 oldsd->output_queue = NULL;
4377
4378 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4379 local_irq_enable();
4380
4381 /* Process offline CPU's input_pkt_queue */
4382 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4383 netif_rx(skb);
4384
4385 return NOTIFY_OK;
4386}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387
Chris Leechdb217332006-06-17 21:24:58 -07004388#ifdef CONFIG_NET_DMA
4389/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004390 * net_dma_rebalance - try to maintain one DMA channel per CPU
4391 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4392 *
4393 * This is called when the number of channels allocated to the net_dma client
4394 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004395 */
Dan Williamsd379b012007-07-09 11:56:42 -07004396
4397static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004398{
Dan Williamsd379b012007-07-09 11:56:42 -07004399 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004400 struct dma_chan *chan;
4401
Dan Williamsd379b012007-07-09 11:56:42 -07004402 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004403 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004404 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004405 return;
4406 }
4407
4408 i = 0;
4409 cpu = first_cpu(cpu_online_map);
4410
Dan Williamsd379b012007-07-09 11:56:42 -07004411 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4412 chan = net_dma->channels[chan_idx];
4413
4414 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4415 + (i < (num_online_cpus() %
4416 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004417
4418 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004419 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004420 cpu = next_cpu(cpu, cpu_online_map);
4421 n--;
4422 }
4423 i++;
4424 }
Chris Leechdb217332006-06-17 21:24:58 -07004425}
4426
4427/**
4428 * netdev_dma_event - event callback for the net_dma_client
4429 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004430 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004431 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004432 */
Dan Williamsd379b012007-07-09 11:56:42 -07004433static enum dma_state_client
4434netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4435 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004436{
Dan Williamsd379b012007-07-09 11:56:42 -07004437 int i, found = 0, pos = -1;
4438 struct net_dma *net_dma =
4439 container_of(client, struct net_dma, client);
4440 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4441
4442 spin_lock(&net_dma->lock);
4443 switch (state) {
4444 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004445 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004446 if (net_dma->channels[i] == chan) {
4447 found = 1;
4448 break;
4449 } else if (net_dma->channels[i] == NULL && pos < 0)
4450 pos = i;
4451
4452 if (!found && pos >= 0) {
4453 ack = DMA_ACK;
4454 net_dma->channels[pos] = chan;
4455 cpu_set(pos, net_dma->channel_mask);
4456 net_dma_rebalance(net_dma);
4457 }
Chris Leechdb217332006-06-17 21:24:58 -07004458 break;
4459 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004460 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004461 if (net_dma->channels[i] == chan) {
4462 found = 1;
4463 pos = i;
4464 break;
4465 }
4466
4467 if (found) {
4468 ack = DMA_ACK;
4469 cpu_clear(pos, net_dma->channel_mask);
4470 net_dma->channels[i] = NULL;
4471 net_dma_rebalance(net_dma);
4472 }
Chris Leechdb217332006-06-17 21:24:58 -07004473 break;
4474 default:
4475 break;
4476 }
Dan Williamsd379b012007-07-09 11:56:42 -07004477 spin_unlock(&net_dma->lock);
4478
4479 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004480}
4481
4482/**
4483 * netdev_dma_regiser - register the networking subsystem as a DMA client
4484 */
4485static int __init netdev_dma_register(void)
4486{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004487 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4488 GFP_KERNEL);
4489 if (unlikely(!net_dma.channels)) {
4490 printk(KERN_NOTICE
4491 "netdev_dma: no memory for net_dma.channels\n");
4492 return -ENOMEM;
4493 }
Dan Williamsd379b012007-07-09 11:56:42 -07004494 spin_lock_init(&net_dma.lock);
4495 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4496 dma_async_client_register(&net_dma.client);
4497 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004498 return 0;
4499}
4500
4501#else
4502static int __init netdev_dma_register(void) { return -ENODEV; }
4503#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504
Herbert Xu7f353bf2007-08-10 15:47:58 -07004505/**
4506 * netdev_compute_feature - compute conjunction of two feature sets
4507 * @all: first feature set
4508 * @one: second feature set
4509 *
4510 * Computes a new feature set after adding a device with feature set
4511 * @one to the master device with current feature set @all. Returns
4512 * the new feature set.
4513 */
4514int netdev_compute_features(unsigned long all, unsigned long one)
4515{
4516 /* if device needs checksumming, downgrade to hw checksumming */
4517 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4518 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4519
4520 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4521 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4522 all ^= NETIF_F_HW_CSUM
4523 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4524
4525 if (one & NETIF_F_GSO)
4526 one |= NETIF_F_GSO_SOFTWARE;
4527 one |= NETIF_F_GSO;
4528
4529 /* If even one device supports robust GSO, enable it for all. */
4530 if (one & NETIF_F_GSO_ROBUST)
4531 all |= NETIF_F_GSO_ROBUST;
4532
4533 all &= one | NETIF_F_LLTX;
4534
4535 if (!(all & NETIF_F_ALL_CSUM))
4536 all &= ~NETIF_F_SG;
4537 if (!(all & NETIF_F_SG))
4538 all &= ~NETIF_F_GSO_MASK;
4539
4540 return all;
4541}
4542EXPORT_SYMBOL(netdev_compute_features);
4543
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004544static struct hlist_head *netdev_create_hash(void)
4545{
4546 int i;
4547 struct hlist_head *hash;
4548
4549 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4550 if (hash != NULL)
4551 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4552 INIT_HLIST_HEAD(&hash[i]);
4553
4554 return hash;
4555}
4556
Eric W. Biederman881d9662007-09-17 11:56:21 -07004557/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004558static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004559{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004560 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004561
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004562 net->dev_name_head = netdev_create_hash();
4563 if (net->dev_name_head == NULL)
4564 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004565
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004566 net->dev_index_head = netdev_create_hash();
4567 if (net->dev_index_head == NULL)
4568 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004569
4570 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004571
4572err_idx:
4573 kfree(net->dev_name_head);
4574err_name:
4575 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004576}
4577
Pavel Emelyanov46650792007-10-08 20:38:39 -07004578static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004579{
4580 kfree(net->dev_name_head);
4581 kfree(net->dev_index_head);
4582}
4583
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004584static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004585 .init = netdev_init,
4586 .exit = netdev_exit,
4587};
4588
Pavel Emelyanov46650792007-10-08 20:38:39 -07004589static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004590{
4591 struct net_device *dev, *next;
4592 /*
4593 * Push all migratable of the network devices back to the
4594 * initial network namespace
4595 */
4596 rtnl_lock();
4597 for_each_netdev_safe(net, dev, next) {
4598 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004599 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004600
4601 /* Ignore unmoveable devices (i.e. loopback) */
4602 if (dev->features & NETIF_F_NETNS_LOCAL)
4603 continue;
4604
4605 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004606 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4607 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004608 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004609 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004610 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004611 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004612 }
4613 }
4614 rtnl_unlock();
4615}
4616
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004617static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004618 .exit = default_device_exit,
4619};
4620
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621/*
4622 * Initialize the DEV module. At boot time this walks the device list and
4623 * unhooks any devices that fail to initialise (normally hardware not
4624 * present) and leaves us with a valid list of present and active devices.
4625 *
4626 */
4627
4628/*
4629 * This is called single threaded during boot, so no need
4630 * to take the rtnl semaphore.
4631 */
4632static int __init net_dev_init(void)
4633{
4634 int i, rc = -ENOMEM;
4635
4636 BUG_ON(!dev_boot_phase);
4637
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 if (dev_proc_init())
4639 goto out;
4640
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004641 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642 goto out;
4643
4644 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004645 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646 INIT_LIST_HEAD(&ptype_base[i]);
4647
Eric W. Biederman881d9662007-09-17 11:56:21 -07004648 if (register_pernet_subsys(&netdev_net_ops))
4649 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650
Eric W. Biedermance286d32007-09-12 13:53:49 +02004651 if (register_pernet_device(&default_device_ops))
4652 goto out;
4653
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 /*
4655 * Initialise the packet receive queues.
4656 */
4657
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004658 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004659 struct softnet_data *queue;
4660
4661 queue = &per_cpu(softnet_data, i);
4662 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 queue->completion_queue = NULL;
4664 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004665
4666 queue->backlog.poll = process_backlog;
4667 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668 }
4669
Chris Leechdb217332006-06-17 21:24:58 -07004670 netdev_dma_register();
4671
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 dev_boot_phase = 0;
4673
4674 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4675 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4676
4677 hotcpu_notifier(dev_cpu_callback, 0);
4678 dst_init();
4679 dev_mcast_init();
4680 rc = 0;
4681out:
4682 return rc;
4683}
4684
4685subsys_initcall(net_dev_init);
4686
4687EXPORT_SYMBOL(__dev_get_by_index);
4688EXPORT_SYMBOL(__dev_get_by_name);
4689EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004690EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691EXPORT_SYMBOL(dev_add_pack);
4692EXPORT_SYMBOL(dev_alloc_name);
4693EXPORT_SYMBOL(dev_close);
4694EXPORT_SYMBOL(dev_get_by_flags);
4695EXPORT_SYMBOL(dev_get_by_index);
4696EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697EXPORT_SYMBOL(dev_open);
4698EXPORT_SYMBOL(dev_queue_xmit);
4699EXPORT_SYMBOL(dev_remove_pack);
4700EXPORT_SYMBOL(dev_set_allmulti);
4701EXPORT_SYMBOL(dev_set_promiscuity);
4702EXPORT_SYMBOL(dev_change_flags);
4703EXPORT_SYMBOL(dev_set_mtu);
4704EXPORT_SYMBOL(dev_set_mac_address);
4705EXPORT_SYMBOL(free_netdev);
4706EXPORT_SYMBOL(netdev_boot_setup_check);
4707EXPORT_SYMBOL(netdev_set_master);
4708EXPORT_SYMBOL(netdev_state_change);
4709EXPORT_SYMBOL(netif_receive_skb);
4710EXPORT_SYMBOL(netif_rx);
4711EXPORT_SYMBOL(register_gifconf);
4712EXPORT_SYMBOL(register_netdevice);
4713EXPORT_SYMBOL(register_netdevice_notifier);
4714EXPORT_SYMBOL(skb_checksum_help);
4715EXPORT_SYMBOL(synchronize_net);
4716EXPORT_SYMBOL(unregister_netdevice);
4717EXPORT_SYMBOL(unregister_netdevice_notifier);
4718EXPORT_SYMBOL(net_enable_timestamp);
4719EXPORT_SYMBOL(net_disable_timestamp);
4720EXPORT_SYMBOL(dev_get_flags);
4721
4722#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4723EXPORT_SYMBOL(br_handle_frame_hook);
4724EXPORT_SYMBOL(br_fdb_get_hook);
4725EXPORT_SYMBOL(br_fdb_put_hook);
4726#endif
4727
4728#ifdef CONFIG_KMOD
4729EXPORT_SYMBOL(dev_load);
4730#endif
4731
4732EXPORT_PER_CPU_SYMBOL(softnet_data);