blob: 69378f2506955acd69bf3d9e3b6b2b9fc584a778 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700125#include "net-sysfs.h"
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/*
128 * The list of packet types we will receive (as opposed to discard)
129 * and the routines to invoke.
130 *
131 * Why 16. Because with 16 the only overlap we get on a hash of the
132 * low nibble of the protocol value is RARP/SNAP/X.25.
133 *
134 * NOTE: That is no longer true with the addition of VLAN tags. Not
135 * sure which should go first, but I bet it won't make much
136 * difference if we are running VLANs. The good news is that
137 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700138 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 * --BLG
140 *
141 * 0800 IP
142 * 8100 802.1Q VLAN
143 * 0001 802.3
144 * 0002 AX.25
145 * 0004 802.2
146 * 8035 RARP
147 * 0005 SNAP
148 * 0805 X.25
149 * 0806 ARP
150 * 8137 IPX
151 * 0009 Localtalk
152 * 86DD IPv6
153 */
154
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800155#define PTYPE_HASH_SIZE (16)
156#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800159static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700160static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Chris Leechdb217332006-06-17 21:24:58 -0700162#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700163struct net_dma {
164 struct dma_client client;
165 spinlock_t lock;
166 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700167 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700168};
169
170static enum dma_state_client
171netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
172 enum dma_state state);
173
174static struct net_dma net_dma = {
175 .client = {
176 .event_callback = netdev_dma_event,
177 },
178};
Chris Leechdb217332006-06-17 21:24:58 -0700179#endif
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * semaphore.
184 *
185 * Pure readers hold dev_base_lock for reading.
186 *
187 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700188 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
191 *
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
195 *
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
198 * semaphore held.
199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200DEFINE_RWLOCK(dev_base_lock);
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202EXPORT_SYMBOL(dev_base_lock);
203
204#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biedermance286d32007-09-12 13:53:49 +0200218/* Device list insertion */
219static int list_netdevice(struct net_device *dev)
220{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900221 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222
223 ASSERT_RTNL();
224
225 write_lock_bh(&dev_base_lock);
226 list_add_tail(&dev->dev_list, &net->dev_base_head);
227 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
228 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
229 write_unlock_bh(&dev_base_lock);
230 return 0;
231}
232
233/* Device list removal */
234static void unlist_netdevice(struct net_device *dev)
235{
236 ASSERT_RTNL();
237
238 /* Unlink dev from the device chain */
239 write_lock_bh(&dev_base_lock);
240 list_del(&dev->dev_list);
241 hlist_del(&dev->name_hlist);
242 hlist_del(&dev->index_hlist);
243 write_unlock_bh(&dev_base_lock);
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246/*
247 * Our notifier list
248 */
249
Alan Sternf07d5b92006-05-09 15:23:03 -0700250static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252/*
253 * Device drivers call our routines to queue packets here. We empty the
254 * queue in the local softnet handler.
255 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700256
257DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700259#ifdef CONFIG_DEBUG_LOCK_ALLOC
260/*
David S. Millerc773e842008-07-08 23:13:53 -0700261 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
277 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
278 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
279 ARPHRD_NONE};
280
281static const char *netdev_lock_name[] =
282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
294 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
295 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
296 "_xmit_NONE"};
297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
299
300static inline unsigned short netdev_lock_pos(unsigned short dev_type)
301{
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
306 return i;
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
309}
310
311static inline void netdev_set_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
313{
314 int i;
315
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
319}
320#else
321static inline void netdev_set_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
323{
324}
325#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327/*******************************************************************************
328
329 Protocol management and registration routines
330
331*******************************************************************************/
332
333/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * Add a protocol ID to the list. Now that the input handler is
335 * smarter we can dispense with all the messy stuff that used to be
336 * here.
337 *
338 * BEWARE!!! Protocol handlers, mangling input packets,
339 * MUST BE last in hash buckets and checking protocol handlers
340 * MUST start from promiscuous ptype_all chain in net_bh.
341 * It is true now, do not change it.
342 * Explanation follows: if protocol handler, mangling packet, will
343 * be the first on list, it is not able to sense, that packet
344 * is cloned and should be copied-on-write, so that it will
345 * change it and subsequent readers will get broken packet.
346 * --ANK (980803)
347 */
348
349/**
350 * dev_add_pack - add packet handler
351 * @pt: packet type declaration
352 *
353 * Add a protocol handler to the networking stack. The passed &packet_type
354 * is linked into kernel lists and may not be freed until it has been
355 * removed from the kernel lists.
356 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900357 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 * guarantee all CPU's that are in middle of receiving packets
359 * will see the new packet type (until the next received packet).
360 */
361
362void dev_add_pack(struct packet_type *pt)
363{
364 int hash;
365
366 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700367 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700369 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800370 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 list_add_rcu(&pt->list, &ptype_base[hash]);
372 }
373 spin_unlock_bh(&ptype_lock);
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/**
377 * __dev_remove_pack - remove packet handler
378 * @pt: packet type declaration
379 *
380 * Remove a protocol handler that was previously added to the kernel
381 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
382 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900383 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * The packet type might still be in use by receivers
386 * and must not be freed until after all the CPU's have gone
387 * through a quiescent state.
388 */
389void __dev_remove_pack(struct packet_type *pt)
390{
391 struct list_head *head;
392 struct packet_type *pt1;
393
394 spin_lock_bh(&ptype_lock);
395
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700396 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700398 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800399 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 list_for_each_entry(pt1, head, list) {
402 if (pt == pt1) {
403 list_del_rcu(&pt->list);
404 goto out;
405 }
406 }
407
408 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
409out:
410 spin_unlock_bh(&ptype_lock);
411}
412/**
413 * dev_remove_pack - remove packet handler
414 * @pt: packet type declaration
415 *
416 * Remove a protocol handler that was previously added to the kernel
417 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
418 * from the kernel lists and can be freed or reused once this function
419 * returns.
420 *
421 * This call sleeps to guarantee that no CPU is looking at the packet
422 * type after return.
423 */
424void dev_remove_pack(struct packet_type *pt)
425{
426 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 synchronize_net();
429}
430
431/******************************************************************************
432
433 Device Boot-time Settings Routines
434
435*******************************************************************************/
436
437/* Boot time configuration table */
438static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
439
440/**
441 * netdev_boot_setup_add - add new setup entry
442 * @name: name of the device
443 * @map: configured settings for the device
444 *
445 * Adds new setup entry to the dev_boot_setup list. The function
446 * returns 0 on error and 1 on success. This is a generic routine to
447 * all netdevices.
448 */
449static int netdev_boot_setup_add(char *name, struct ifmap *map)
450{
451 struct netdev_boot_setup *s;
452 int i;
453
454 s = dev_boot_setup;
455 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
456 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
457 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700458 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 memcpy(&s[i].map, map, sizeof(s[i].map));
460 break;
461 }
462 }
463
464 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
465}
466
467/**
468 * netdev_boot_setup_check - check boot time settings
469 * @dev: the netdevice
470 *
471 * Check boot time settings for the device.
472 * The found settings are set for the device to be used
473 * later in the device probing.
474 * Returns 0 if no settings found, 1 if they are.
475 */
476int netdev_boot_setup_check(struct net_device *dev)
477{
478 struct netdev_boot_setup *s = dev_boot_setup;
479 int i;
480
481 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
482 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700483 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 dev->irq = s[i].map.irq;
485 dev->base_addr = s[i].map.base_addr;
486 dev->mem_start = s[i].map.mem_start;
487 dev->mem_end = s[i].map.mem_end;
488 return 1;
489 }
490 }
491 return 0;
492}
493
494
495/**
496 * netdev_boot_base - get address from boot time settings
497 * @prefix: prefix for network device
498 * @unit: id for network device
499 *
500 * Check boot time settings for the base address of device.
501 * The found settings are set for the device to be used
502 * later in the device probing.
503 * Returns 0 if no settings found.
504 */
505unsigned long netdev_boot_base(const char *prefix, int unit)
506{
507 const struct netdev_boot_setup *s = dev_boot_setup;
508 char name[IFNAMSIZ];
509 int i;
510
511 sprintf(name, "%s%d", prefix, unit);
512
513 /*
514 * If device already registered then return base of 1
515 * to indicate not to probe for this interface
516 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700517 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 return 1;
519
520 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
521 if (!strcmp(name, s[i].name))
522 return s[i].map.base_addr;
523 return 0;
524}
525
526/*
527 * Saves at boot time configured settings for any netdevice.
528 */
529int __init netdev_boot_setup(char *str)
530{
531 int ints[5];
532 struct ifmap map;
533
534 str = get_options(str, ARRAY_SIZE(ints), ints);
535 if (!str || !*str)
536 return 0;
537
538 /* Save settings */
539 memset(&map, 0, sizeof(map));
540 if (ints[0] > 0)
541 map.irq = ints[1];
542 if (ints[0] > 1)
543 map.base_addr = ints[2];
544 if (ints[0] > 2)
545 map.mem_start = ints[3];
546 if (ints[0] > 3)
547 map.mem_end = ints[4];
548
549 /* Add new entry to the list */
550 return netdev_boot_setup_add(str, &map);
551}
552
553__setup("netdev=", netdev_boot_setup);
554
555/*******************************************************************************
556
557 Device Interface Subroutines
558
559*******************************************************************************/
560
561/**
562 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700563 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 * @name: name to find
565 *
566 * Find an interface by name. Must be called under RTNL semaphore
567 * or @dev_base_lock. If the name is found a pointer to the device
568 * is returned. If the name is not found then %NULL is returned. The
569 * reference counters are not incremented so the caller must be
570 * careful with locks.
571 */
572
Eric W. Biederman881d9662007-09-17 11:56:21 -0700573struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 struct hlist_node *p;
576
Eric W. Biederman881d9662007-09-17 11:56:21 -0700577 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 struct net_device *dev
579 = hlist_entry(p, struct net_device, name_hlist);
580 if (!strncmp(dev->name, name, IFNAMSIZ))
581 return dev;
582 }
583 return NULL;
584}
585
586/**
587 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700588 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 * @name: name to find
590 *
591 * Find an interface by name. This can be called from any
592 * context and does its own locking. The returned handle has
593 * the usage count incremented and the caller must use dev_put() to
594 * release it when it is no longer needed. %NULL is returned if no
595 * matching device is found.
596 */
597
Eric W. Biederman881d9662007-09-17 11:56:21 -0700598struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
600 struct net_device *dev;
601
602 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700603 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (dev)
605 dev_hold(dev);
606 read_unlock(&dev_base_lock);
607 return dev;
608}
609
610/**
611 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700612 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * @ifindex: index of device
614 *
615 * Search for an interface by index. Returns %NULL if the device
616 * is not found or a pointer to the device. The device has not
617 * had its reference counter increased so the caller must be careful
618 * about locking. The caller must hold either the RTNL semaphore
619 * or @dev_base_lock.
620 */
621
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 struct hlist_node *p;
625
Eric W. Biederman881d9662007-09-17 11:56:21 -0700626 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 struct net_device *dev
628 = hlist_entry(p, struct net_device, index_hlist);
629 if (dev->ifindex == ifindex)
630 return dev;
631 }
632 return NULL;
633}
634
635
636/**
637 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700638 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 * @ifindex: index of device
640 *
641 * Search for an interface by index. Returns NULL if the device
642 * is not found or a pointer to the device. The device returned has
643 * had a reference added and the pointer is safe until the user calls
644 * dev_put to indicate they have finished with it.
645 */
646
Eric W. Biederman881d9662007-09-17 11:56:21 -0700647struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 struct net_device *dev;
650
651 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700652 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 if (dev)
654 dev_hold(dev);
655 read_unlock(&dev_base_lock);
656 return dev;
657}
658
659/**
660 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700661 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 * @type: media type of device
663 * @ha: hardware address
664 *
665 * Search for an interface by MAC address. Returns NULL if the device
666 * is not found or a pointer to the device. The caller must hold the
667 * rtnl semaphore. The returned device has not had its ref count increased
668 * and the caller must therefore be careful about locking
669 *
670 * BUGS:
671 * If the API was consistent this would be __dev_get_by_hwaddr
672 */
673
Eric W. Biederman881d9662007-09-17 11:56:21 -0700674struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
676 struct net_device *dev;
677
678 ASSERT_RTNL();
679
Denis V. Lunev81103a52007-12-12 10:47:38 -0800680 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (dev->type == type &&
682 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700683 return dev;
684
685 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300688EXPORT_SYMBOL(dev_getbyhwaddr);
689
Eric W. Biederman881d9662007-09-17 11:56:21 -0700690struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700691{
692 struct net_device *dev;
693
694 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700695 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700696 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700697 return dev;
698
699 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700}
701
702EXPORT_SYMBOL(__dev_getfirstbyhwtype);
703
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 struct net_device *dev;
707
708 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710 if (dev)
711 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 rtnl_unlock();
713 return dev;
714}
715
716EXPORT_SYMBOL(dev_getfirstbyhwtype);
717
718/**
719 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700720 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 * @if_flags: IFF_* values
722 * @mask: bitmask of bits in if_flags to check
723 *
724 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900725 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
728 */
729
Eric W. Biederman881d9662007-09-17 11:56:21 -0700730struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700732 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Pavel Emelianov7562f872007-05-03 15:13:45 -0700734 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700736 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (((dev->flags ^ if_flags) & mask) == 0) {
738 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741 }
742 }
743 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700744 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746
747/**
748 * dev_valid_name - check if name is okay for network device
749 * @name: name string
750 *
751 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700752 * to allow sysfs to work. We also disallow any kind of
753 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800755int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700757 if (*name == '\0')
758 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700759 if (strlen(name) >= IFNAMSIZ)
760 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 if (!strcmp(name, ".") || !strcmp(name, ".."))
762 return 0;
763
764 while (*name) {
765 if (*name == '/' || isspace(*name))
766 return 0;
767 name++;
768 }
769 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
772/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200773 * __dev_alloc_name - allocate a name for a device
774 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200776 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 *
778 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700779 * id. It scans list of devices to build up a free map, then chooses
780 * the first empty slot. The caller must hold the dev_base or rtnl lock
781 * while allocating the name and adding the device in order to avoid
782 * duplicates.
783 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
784 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 */
786
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200787static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 const char *p;
791 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700792 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 struct net_device *d;
794
795 p = strnchr(name, IFNAMSIZ-1, '%');
796 if (p) {
797 /*
798 * Verify the string as this thing may have come from
799 * the user. There must be either one "%d" and no other "%"
800 * characters.
801 */
802 if (p[1] != 'd' || strchr(p + 2, '%'))
803 return -EINVAL;
804
805 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700806 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (!inuse)
808 return -ENOMEM;
809
Eric W. Biederman881d9662007-09-17 11:56:21 -0700810 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (!sscanf(d->name, name, &i))
812 continue;
813 if (i < 0 || i >= max_netdevices)
814 continue;
815
816 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200817 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!strncmp(buf, d->name, IFNAMSIZ))
819 set_bit(i, inuse);
820 }
821
822 i = find_first_zero_bit(inuse, max_netdevices);
823 free_page((unsigned long) inuse);
824 }
825
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
827 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 /* It is possible to run out of possible slots
831 * when the name is long and there isn't enough space left
832 * for the digits, or if all bits are used.
833 */
834 return -ENFILE;
835}
836
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200837/**
838 * dev_alloc_name - allocate a name for a device
839 * @dev: device
840 * @name: name format string
841 *
842 * Passed a format string - eg "lt%d" it will try and find a suitable
843 * id. It scans list of devices to build up a free map, then chooses
844 * the first empty slot. The caller must hold the dev_base or rtnl lock
845 * while allocating the name and adding the device in order to avoid
846 * duplicates.
847 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
848 * Returns the number of the unit assigned or a negative errno code.
849 */
850
851int dev_alloc_name(struct net_device *dev, const char *name)
852{
853 char buf[IFNAMSIZ];
854 struct net *net;
855 int ret;
856
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900857 BUG_ON(!dev_net(dev));
858 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200859 ret = __dev_alloc_name(net, name, buf);
860 if (ret >= 0)
861 strlcpy(dev->name, buf, IFNAMSIZ);
862 return ret;
863}
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866/**
867 * dev_change_name - change name of a device
868 * @dev: device
869 * @newname: name (or format string) must be at least IFNAMSIZ
870 *
871 * Change name of a device, can pass format strings "eth%d".
872 * for wildcarding.
873 */
874int dev_change_name(struct net_device *dev, char *newname)
875{
Herbert Xufcc5a032007-07-30 17:03:38 -0700876 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700878 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900882 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900884 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (dev->flags & IFF_UP)
886 return -EBUSY;
887
888 if (!dev_valid_name(newname))
889 return -EINVAL;
890
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700891 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
892 return 0;
893
Herbert Xufcc5a032007-07-30 17:03:38 -0700894 memcpy(oldname, dev->name, IFNAMSIZ);
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (strchr(newname, '%')) {
897 err = dev_alloc_name(dev, newname);
898 if (err < 0)
899 return err;
900 strcpy(newname, dev->name);
901 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700902 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return -EEXIST;
904 else
905 strlcpy(dev->name, newname, IFNAMSIZ);
906
Herbert Xufcc5a032007-07-30 17:03:38 -0700907rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700908 err = device_rename(&dev->dev, dev->name);
909 if (err) {
910 memcpy(dev->name, oldname, IFNAMSIZ);
911 return err;
912 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700913
914 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600915 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700916 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700917 write_unlock_bh(&dev_base_lock);
918
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700919 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700920 ret = notifier_to_errno(ret);
921
922 if (ret) {
923 if (err) {
924 printk(KERN_ERR
925 "%s: name change rollback failed: %d.\n",
926 dev->name, ret);
927 } else {
928 err = ret;
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 goto rollback;
931 }
932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 return err;
935}
936
937/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700938 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700939 * @dev: device to cause notification
940 *
941 * Called to indicate a device has changed features.
942 */
943void netdev_features_change(struct net_device *dev)
944{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700945 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700946}
947EXPORT_SYMBOL(netdev_features_change);
948
949/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * netdev_state_change - device changes state
951 * @dev: device to cause notification
952 *
953 * Called to indicate a device has changed state. This function calls
954 * the notifier chains for netdev_chain and sends a NEWLINK message
955 * to the routing socket.
956 */
957void netdev_state_change(struct net_device *dev)
958{
959 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700960 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
962 }
963}
964
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700965void netdev_bonding_change(struct net_device *dev)
966{
967 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
968}
969EXPORT_SYMBOL(netdev_bonding_change);
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/**
972 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700973 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * @name: name of interface
975 *
976 * If a network interface is not present and the process has suitable
977 * privileges this function loads the module. If module loading is not
978 * available in this kernel then it becomes a nop.
979 */
980
Eric W. Biederman881d9662007-09-17 11:56:21 -0700981void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900983 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700986 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 read_unlock(&dev_base_lock);
988
989 if (!dev && capable(CAP_SYS_MODULE))
990 request_module("%s", name);
991}
992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993/**
994 * dev_open - prepare an interface for use.
995 * @dev: device to open
996 *
997 * Takes a device from down to up state. The device's private open
998 * function is invoked and then the multicast lists are loaded. Finally
999 * the device is moved into the up state and a %NETDEV_UP message is
1000 * sent to the netdev notifier chain.
1001 *
1002 * Calling this function on an active interface is a nop. On a failure
1003 * a negative errno code is returned.
1004 */
1005int dev_open(struct net_device *dev)
1006{
1007 int ret = 0;
1008
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001009 ASSERT_RTNL();
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 /*
1012 * Is it already up?
1013 */
1014
1015 if (dev->flags & IFF_UP)
1016 return 0;
1017
1018 /*
1019 * Is it even present?
1020 */
1021 if (!netif_device_present(dev))
1022 return -ENODEV;
1023
1024 /*
1025 * Call device private open method
1026 */
1027 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001028
1029 if (dev->validate_addr)
1030 ret = dev->validate_addr(dev);
1031
1032 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001035 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 * If it went open OK then:
1037 */
1038
Jeff Garzikbada3392007-10-23 20:19:37 -07001039 if (ret)
1040 clear_bit(__LINK_STATE_START, &dev->state);
1041 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /*
1043 * Set the flags.
1044 */
1045 dev->flags |= IFF_UP;
1046
1047 /*
1048 * Initialize multicasting status
1049 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001050 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /*
1053 * Wakeup transmit queue engine
1054 */
1055 dev_activate(dev);
1056
1057 /*
1058 * ... and announce new interface.
1059 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001060 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return ret;
1064}
1065
1066/**
1067 * dev_close - shutdown an interface.
1068 * @dev: device to shutdown
1069 *
1070 * This function moves an active device into down state. A
1071 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1072 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1073 * chain.
1074 */
1075int dev_close(struct net_device *dev)
1076{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001077 ASSERT_RTNL();
1078
David S. Miller9d5010d2007-09-12 14:33:25 +02001079 might_sleep();
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!(dev->flags & IFF_UP))
1082 return 0;
1083
1084 /*
1085 * Tell people we are going down, so that they can
1086 * prepare to death, when device is still operating.
1087 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 clear_bit(__LINK_STATE_START, &dev->state);
1091
1092 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001093 * it can be even on different cpu. So just clear netif_running().
1094 *
1095 * dev->stop() will invoke napi_disable() on all of it's
1096 * napi_struct instances on this device.
1097 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001100 dev_deactivate(dev);
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 /*
1103 * Call the device specific close. This cannot fail.
1104 * Only if device is UP
1105 *
1106 * We allow it to be called even after a DETACH hot-plug
1107 * event.
1108 */
1109 if (dev->stop)
1110 dev->stop(dev);
1111
1112 /*
1113 * Device is now down.
1114 */
1115
1116 dev->flags &= ~IFF_UP;
1117
1118 /*
1119 * Tell people we are down
1120 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001121 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 return 0;
1124}
1125
1126
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001127/**
1128 * dev_disable_lro - disable Large Receive Offload on a device
1129 * @dev: device
1130 *
1131 * Disable Large Receive Offload (LRO) on a net device. Must be
1132 * called under RTNL. This is needed if received packets may be
1133 * forwarded to another interface.
1134 */
1135void dev_disable_lro(struct net_device *dev)
1136{
1137 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1138 dev->ethtool_ops->set_flags) {
1139 u32 flags = dev->ethtool_ops->get_flags(dev);
1140 if (flags & ETH_FLAG_LRO) {
1141 flags &= ~ETH_FLAG_LRO;
1142 dev->ethtool_ops->set_flags(dev, flags);
1143 }
1144 }
1145 WARN_ON(dev->features & NETIF_F_LRO);
1146}
1147EXPORT_SYMBOL(dev_disable_lro);
1148
1149
Eric W. Biederman881d9662007-09-17 11:56:21 -07001150static int dev_boot_phase = 1;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152/*
1153 * Device change register/unregister. These are not inline or static
1154 * as we export them to the world.
1155 */
1156
1157/**
1158 * register_netdevice_notifier - register a network notifier block
1159 * @nb: notifier
1160 *
1161 * Register a notifier to be called when network device events occur.
1162 * The notifier passed is linked into the kernel structures and must
1163 * not be reused until it has been unregistered. A negative errno code
1164 * is returned on a failure.
1165 *
1166 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001167 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 * view of the network device list.
1169 */
1170
1171int register_netdevice_notifier(struct notifier_block *nb)
1172{
1173 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001174 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001175 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 int err;
1177
1178 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001179 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001180 if (err)
1181 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001182 if (dev_boot_phase)
1183 goto unlock;
1184 for_each_net(net) {
1185 for_each_netdev(net, dev) {
1186 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1187 err = notifier_to_errno(err);
1188 if (err)
1189 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
Eric W. Biederman881d9662007-09-17 11:56:21 -07001191 if (!(dev->flags & IFF_UP))
1192 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001193
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 nb->notifier_call(nb, NETDEV_UP, dev);
1195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001197
1198unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 rtnl_unlock();
1200 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001201
1202rollback:
1203 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001204 for_each_net(net) {
1205 for_each_netdev(net, dev) {
1206 if (dev == last)
1207 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001208
Eric W. Biederman881d9662007-09-17 11:56:21 -07001209 if (dev->flags & IFF_UP) {
1210 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1211 nb->notifier_call(nb, NETDEV_DOWN, dev);
1212 }
1213 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001214 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001215 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001216
1217 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
1221/**
1222 * unregister_netdevice_notifier - unregister a network notifier block
1223 * @nb: notifier
1224 *
1225 * Unregister a notifier previously registered by
1226 * register_netdevice_notifier(). The notifier is unlinked into the
1227 * kernel structures and may then be reused. A negative errno code
1228 * is returned on a failure.
1229 */
1230
1231int unregister_netdevice_notifier(struct notifier_block *nb)
1232{
Herbert Xu9f514952006-03-25 01:24:25 -08001233 int err;
1234
1235 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001236 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001237 rtnl_unlock();
1238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239}
1240
1241/**
1242 * call_netdevice_notifiers - call all network notifier blocks
1243 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001244 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 *
1246 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001247 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 */
1249
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001250int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001252 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}
1254
1255/* When > 0 there are consumers of rx skb time stamps */
1256static atomic_t netstamp_needed = ATOMIC_INIT(0);
1257
1258void net_enable_timestamp(void)
1259{
1260 atomic_inc(&netstamp_needed);
1261}
1262
1263void net_disable_timestamp(void)
1264{
1265 atomic_dec(&netstamp_needed);
1266}
1267
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001268static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
1270 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001271 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001272 else
1273 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/*
1277 * Support routine. Sends outgoing frames to any network
1278 * taps currently in use.
1279 */
1280
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001281static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001284
1285 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 rcu_read_lock();
1288 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1289 /* Never send packets back to the socket
1290 * they originated from - MvS (miquels@drinkel.ow.org)
1291 */
1292 if ((ptype->dev == dev || !ptype->dev) &&
1293 (ptype->af_packet_priv == NULL ||
1294 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1295 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1296 if (!skb2)
1297 break;
1298
1299 /* skb->nh should be correctly
1300 set by sender, so that the second statement is
1301 just protection against buggy protocols.
1302 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001303 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001305 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001306 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 if (net_ratelimit())
1308 printk(KERN_CRIT "protocol %04x is "
1309 "buggy, dev %s\n",
1310 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001311 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 }
1313
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001314 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001316 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
1318 }
1319 rcu_read_unlock();
1320}
1321
Denis Vlasenko56079432006-03-29 15:57:29 -08001322
David S. Miller86d804e2008-07-08 23:11:25 -07001323void __netif_schedule(struct netdev_queue *txq)
Denis Vlasenko56079432006-03-29 15:57:29 -08001324{
David S. Miller86d804e2008-07-08 23:11:25 -07001325 struct net_device *dev = txq->dev;
1326
Denis Vlasenko56079432006-03-29 15:57:29 -08001327 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
Denis Vlasenko56079432006-03-29 15:57:29 -08001328 struct softnet_data *sd;
David S. Miller86d804e2008-07-08 23:11:25 -07001329 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001330
1331 local_irq_save(flags);
1332 sd = &__get_cpu_var(softnet_data);
David S. Milleree609cb2008-07-08 22:58:37 -07001333 txq->next_sched = sd->output_queue;
1334 sd->output_queue = txq;
Denis Vlasenko56079432006-03-29 15:57:29 -08001335 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1336 local_irq_restore(flags);
1337 }
1338}
1339EXPORT_SYMBOL(__netif_schedule);
1340
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001341void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001342{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001343 if (atomic_dec_and_test(&skb->users)) {
1344 struct softnet_data *sd;
1345 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001346
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001347 local_irq_save(flags);
1348 sd = &__get_cpu_var(softnet_data);
1349 skb->next = sd->completion_queue;
1350 sd->completion_queue = skb;
1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1352 local_irq_restore(flags);
1353 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001354}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001355EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001356
1357void dev_kfree_skb_any(struct sk_buff *skb)
1358{
1359 if (in_irq() || irqs_disabled())
1360 dev_kfree_skb_irq(skb);
1361 else
1362 dev_kfree_skb(skb);
1363}
1364EXPORT_SYMBOL(dev_kfree_skb_any);
1365
1366
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001367/**
1368 * netif_device_detach - mark device as removed
1369 * @dev: network device
1370 *
1371 * Mark device as removed from system and therefore no longer available.
1372 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001373void netif_device_detach(struct net_device *dev)
1374{
1375 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1376 netif_running(dev)) {
1377 netif_stop_queue(dev);
1378 }
1379}
1380EXPORT_SYMBOL(netif_device_detach);
1381
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001382/**
1383 * netif_device_attach - mark device as attached
1384 * @dev: network device
1385 *
1386 * Mark device as attached from system and restart if needed.
1387 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001388void netif_device_attach(struct net_device *dev)
1389{
1390 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1391 netif_running(dev)) {
1392 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001393 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001394 }
1395}
1396EXPORT_SYMBOL(netif_device_attach);
1397
Ben Hutchings6de329e2008-06-16 17:02:28 -07001398static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1399{
1400 return ((features & NETIF_F_GEN_CSUM) ||
1401 ((features & NETIF_F_IP_CSUM) &&
1402 protocol == htons(ETH_P_IP)) ||
1403 ((features & NETIF_F_IPV6_CSUM) &&
1404 protocol == htons(ETH_P_IPV6)));
1405}
1406
1407static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1408{
1409 if (can_checksum_protocol(dev->features, skb->protocol))
1410 return true;
1411
1412 if (skb->protocol == htons(ETH_P_8021Q)) {
1413 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1414 if (can_checksum_protocol(dev->features & dev->vlan_features,
1415 veh->h_vlan_encapsulated_proto))
1416 return true;
1417 }
1418
1419 return false;
1420}
Denis Vlasenko56079432006-03-29 15:57:29 -08001421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422/*
1423 * Invalidate hardware checksum when packet is to be mangled, and
1424 * complete checksum manually on outgoing path.
1425 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001426int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427{
Al Virod3bc23e2006-11-14 21:24:49 -08001428 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001429 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
Patrick McHardy84fa7932006-08-29 16:44:56 -07001431 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001432 goto out_set_summed;
1433
1434 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001435 /* Let GSO fix up the checksum. */
1436 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
1438
Herbert Xua0308472007-10-15 01:47:15 -07001439 offset = skb->csum_start - skb_headroom(skb);
1440 BUG_ON(offset >= skb_headlen(skb));
1441 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1442
1443 offset += skb->csum_offset;
1444 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1445
1446 if (skb_cloned(skb) &&
1447 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1449 if (ret)
1450 goto out;
1451 }
1452
Herbert Xua0308472007-10-15 01:47:15 -07001453 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001454out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001456out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 return ret;
1458}
1459
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001460/**
1461 * skb_gso_segment - Perform segmentation on skb.
1462 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001463 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001464 *
1465 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001466 *
1467 * It may return NULL if the skb requires no segmentation. This is
1468 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001469 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001470struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001471{
1472 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1473 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001474 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001475 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001476
1477 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001478
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001479 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001480 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001481 __skb_pull(skb, skb->mac_len);
1482
Herbert Xuf9d106a2007-04-23 22:36:13 -07001483 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001484 if (skb_header_cloned(skb) &&
1485 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1486 return ERR_PTR(err);
1487 }
1488
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001489 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001490 list_for_each_entry_rcu(ptype,
1491 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001492 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001493 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001494 err = ptype->gso_send_check(skb);
1495 segs = ERR_PTR(err);
1496 if (err || skb_gso_ok(skb, features))
1497 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001498 __skb_push(skb, (skb->data -
1499 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001500 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001501 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001502 break;
1503 }
1504 }
1505 rcu_read_unlock();
1506
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001507 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001508
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001509 return segs;
1510}
1511
1512EXPORT_SYMBOL(skb_gso_segment);
1513
Herbert Xufb286bb2005-11-10 13:01:24 -08001514/* Take action when hardware reception checksum errors are detected. */
1515#ifdef CONFIG_BUG
1516void netdev_rx_csum_fault(struct net_device *dev)
1517{
1518 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001519 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001520 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001521 dump_stack();
1522 }
1523}
1524EXPORT_SYMBOL(netdev_rx_csum_fault);
1525#endif
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527/* Actually, we should eliminate this check as soon as we know, that:
1528 * 1. IOMMU is present and allows to map all the memory.
1529 * 2. No high memory really exists on this machine.
1530 */
1531
1532static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1533{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001534#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 int i;
1536
1537 if (dev->features & NETIF_F_HIGHDMA)
1538 return 0;
1539
1540 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1541 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1542 return 1;
1543
Herbert Xu3d3a8532006-06-27 13:33:10 -07001544#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return 0;
1546}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001548struct dev_gso_cb {
1549 void (*destructor)(struct sk_buff *skb);
1550};
1551
1552#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1553
1554static void dev_gso_skb_destructor(struct sk_buff *skb)
1555{
1556 struct dev_gso_cb *cb;
1557
1558 do {
1559 struct sk_buff *nskb = skb->next;
1560
1561 skb->next = nskb->next;
1562 nskb->next = NULL;
1563 kfree_skb(nskb);
1564 } while (skb->next);
1565
1566 cb = DEV_GSO_CB(skb);
1567 if (cb->destructor)
1568 cb->destructor(skb);
1569}
1570
1571/**
1572 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1573 * @skb: buffer to segment
1574 *
1575 * This function segments the given skb and stores the list of segments
1576 * in skb->next.
1577 */
1578static int dev_gso_segment(struct sk_buff *skb)
1579{
1580 struct net_device *dev = skb->dev;
1581 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001582 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1583 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001584
Herbert Xu576a30e2006-06-27 13:22:38 -07001585 segs = skb_gso_segment(skb, features);
1586
1587 /* Verifying header integrity only. */
1588 if (!segs)
1589 return 0;
1590
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001591 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001592 return PTR_ERR(segs);
1593
1594 skb->next = segs;
1595 DEV_GSO_CB(skb)->destructor = skb->destructor;
1596 skb->destructor = dev_gso_skb_destructor;
1597
1598 return 0;
1599}
1600
1601int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1602{
1603 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001604 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001605 dev_queue_xmit_nit(skb, dev);
1606
Herbert Xu576a30e2006-06-27 13:22:38 -07001607 if (netif_needs_gso(dev, skb)) {
1608 if (unlikely(dev_gso_segment(skb)))
1609 goto out_kfree_skb;
1610 if (skb->next)
1611 goto gso;
1612 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001613
Herbert Xu576a30e2006-06-27 13:22:38 -07001614 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001615 }
1616
Herbert Xu576a30e2006-06-27 13:22:38 -07001617gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001618 do {
1619 struct sk_buff *nskb = skb->next;
1620 int rc;
1621
1622 skb->next = nskb->next;
1623 nskb->next = NULL;
1624 rc = dev->hard_start_xmit(nskb, dev);
1625 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001626 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001627 skb->next = nskb;
1628 return rc;
1629 }
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001630 if (unlikely((netif_queue_stopped(dev) ||
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001631 netif_subqueue_stopped(dev, skb)) &&
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001632 skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001633 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001634 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001635
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001636 skb->destructor = DEV_GSO_CB(skb)->destructor;
1637
1638out_kfree_skb:
1639 kfree_skb(skb);
1640 return 0;
1641}
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643/**
1644 * dev_queue_xmit - transmit a buffer
1645 * @skb: buffer to transmit
1646 *
1647 * Queue a buffer for transmission to a network device. The caller must
1648 * have set the device and priority and built the buffer before calling
1649 * this function. The function can be called from an interrupt.
1650 *
1651 * A negative errno code is returned on a failure. A success does not
1652 * guarantee the frame will be transmitted as it may be dropped due
1653 * to congestion or traffic shaping.
Ben Greearaf191362005-04-24 20:12:36 -07001654 *
1655 * -----------------------------------------------------------------------------------
1656 * I notice this method can also return errors from the queue disciplines,
1657 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1658 * be positive.
1659 *
1660 * Regardless of the return value, the skb is consumed, so it is currently
1661 * difficult to retry a send to this method. (You can bump the ref count
1662 * before sending to hold a reference for retry if you are careful.)
1663 *
1664 * When calling this method, interrupts MUST be enabled. This is because
1665 * the BH enable code must have IRQs enabled so that it will not deadlock.
1666 * --BLG
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 */
1668
David S. Millere8a04642008-07-17 00:34:19 -07001669static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1670 struct sk_buff *skb)
1671{
1672 return netdev_get_tx_queue(dev, 0);
1673}
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675int dev_queue_xmit(struct sk_buff *skb)
1676{
1677 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001678 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 struct Qdisc *q;
1680 int rc = -ENOMEM;
1681
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001682 /* GSO will handle the following emulations directly. */
1683 if (netif_needs_gso(dev, skb))
1684 goto gso;
1685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 if (skb_shinfo(skb)->frag_list &&
1687 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001688 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 goto out_kfree_skb;
1690
1691 /* Fragmented skb is linearized if device does not support SG,
1692 * or if at least one of fragments is in highmem and device
1693 * does not support DMA from it.
1694 */
1695 if (skb_shinfo(skb)->nr_frags &&
1696 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001697 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 goto out_kfree_skb;
1699
1700 /* If packet is not checksummed and device does not support
1701 * checksumming for this protocol, complete checksumming here.
1702 */
Herbert Xu663ead32007-04-09 11:59:07 -07001703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1704 skb_set_transport_header(skb, skb->csum_start -
1705 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001706 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1707 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001710gso:
David S. Millere8a04642008-07-17 00:34:19 -07001711 txq = dev_pick_tx(dev, skb);
David S. Millerdc2b4842008-07-08 17:18:23 -07001712 spin_lock_prefetch(&txq->lock);
Eric Dumazet2d7ceec2005-09-27 15:22:58 -07001713
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001714 /* Disable soft irqs for various locks below. Also
1715 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001717 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
David S. Millerdc2b4842008-07-08 17:18:23 -07001719 /* Updates of qdisc are serialized by queue->lock.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001720 * The struct Qdisc which is pointed to by qdisc is now a
1721 * rcu structure - it may be accessed without acquiring
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 * a lock (but the structure may be stale.) The freeing of the
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001723 * qdisc will be deferred until it's known that there are no
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 * more references to it.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001725 *
1726 * If the qdisc has an enqueue function, we still need to
David S. Millerdc2b4842008-07-08 17:18:23 -07001727 * hold the queue->lock before calling it, since queue->lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 * also serializes access to the device queue.
1729 */
1730
David S. Millerb0e1e642008-07-08 17:42:10 -07001731 q = rcu_dereference(txq->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732#ifdef CONFIG_NET_CLS_ACT
1733 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1734#endif
1735 if (q->enqueue) {
1736 /* Grab device queue */
David S. Millerdc2b4842008-07-08 17:18:23 -07001737 spin_lock(&txq->lock);
David S. Millerb0e1e642008-07-08 17:42:10 -07001738 q = txq->qdisc;
Patrick McHardy85670cc2006-09-27 16:45:45 -07001739 if (q->enqueue) {
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001740 /* reset queue_mapping to zero */
Pavel Emelyanovdfa40912007-10-21 16:57:55 -07001741 skb_set_queue_mapping(skb, 0);
Patrick McHardy85670cc2006-09-27 16:45:45 -07001742 rc = q->enqueue(skb, q);
David S. Millereb6aafe2008-07-08 23:12:38 -07001743 qdisc_run(txq);
David S. Millerdc2b4842008-07-08 17:18:23 -07001744 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Patrick McHardy85670cc2006-09-27 16:45:45 -07001746 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1747 goto out;
1748 }
David S. Millerdc2b4842008-07-08 17:18:23 -07001749 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 }
1751
1752 /* The device has no queue. Common case for software devices:
1753 loopback, all the sorts of tunnels...
1754
Herbert Xu932ff272006-06-09 12:20:56 -07001755 Really, it is unlikely that netif_tx_lock protection is necessary
1756 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 counters.)
1758 However, it is possible, that they rely on protection
1759 made by us here.
1760
1761 Check this and shot the lock. It is not prone from deadlocks.
1762 Either shot noqueue qdisc, it is even simpler 8)
1763 */
1764 if (dev->flags & IFF_UP) {
1765 int cpu = smp_processor_id(); /* ok because BHs are off */
1766
David S. Millerc773e842008-07-08 23:13:53 -07001767 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
David S. Millerc773e842008-07-08 23:13:53 -07001769 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001771 if (!netif_queue_stopped(dev) &&
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001772 !netif_subqueue_stopped(dev, skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 rc = 0;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001774 if (!dev_hard_start_xmit(skb, dev)) {
David S. Millerc773e842008-07-08 23:13:53 -07001775 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 goto out;
1777 }
1778 }
David S. Millerc773e842008-07-08 23:13:53 -07001779 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if (net_ratelimit())
1781 printk(KERN_CRIT "Virtual device %s asks to "
1782 "queue packet!\n", dev->name);
1783 } else {
1784 /* Recursion is detected! It is possible,
1785 * unfortunately */
1786 if (net_ratelimit())
1787 printk(KERN_CRIT "Dead loop on virtual device "
1788 "%s, fix it urgently!\n", dev->name);
1789 }
1790 }
1791
1792 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001793 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795out_kfree_skb:
1796 kfree_skb(skb);
1797 return rc;
1798out:
Herbert Xud4828d82006-06-22 02:28:18 -07001799 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 return rc;
1801}
1802
1803
1804/*=======================================================================
1805 Receiver routines
1806 =======================================================================*/
1807
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001808int netdev_max_backlog __read_mostly = 1000;
1809int netdev_budget __read_mostly = 300;
1810int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1813
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815/**
1816 * netif_rx - post buffer to the network code
1817 * @skb: buffer to post
1818 *
1819 * This function receives a packet from a device driver and queues it for
1820 * the upper (protocol) levels to process. It always succeeds. The buffer
1821 * may be dropped during processing for congestion control or by the
1822 * protocol layers.
1823 *
1824 * return values:
1825 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 * NET_RX_DROP (packet was dropped)
1827 *
1828 */
1829
1830int netif_rx(struct sk_buff *skb)
1831{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 struct softnet_data *queue;
1833 unsigned long flags;
1834
1835 /* if netpoll wants it, pretend we never saw it */
1836 if (netpoll_rx(skb))
1837 return NET_RX_DROP;
1838
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001839 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001840 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842 /*
1843 * The code is rearranged so that the path is the most
1844 * short when CPU is congested, but is still operating.
1845 */
1846 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 queue = &__get_cpu_var(softnet_data);
1848
1849 __get_cpu_var(netdev_rx_stat).total++;
1850 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1851 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852enqueue:
1853 dev_hold(skb->dev);
1854 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001856 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 }
1858
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001859 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 goto enqueue;
1861 }
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 __get_cpu_var(netdev_rx_stat).dropped++;
1864 local_irq_restore(flags);
1865
1866 kfree_skb(skb);
1867 return NET_RX_DROP;
1868}
1869
1870int netif_rx_ni(struct sk_buff *skb)
1871{
1872 int err;
1873
1874 preempt_disable();
1875 err = netif_rx(skb);
1876 if (local_softirq_pending())
1877 do_softirq();
1878 preempt_enable();
1879
1880 return err;
1881}
1882
1883EXPORT_SYMBOL(netif_rx_ni);
1884
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001885static inline struct net_device *skb_bond(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886{
1887 struct net_device *dev = skb->dev;
1888
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001889 if (dev->master) {
David S. Miller7ea49ed2006-08-14 17:08:36 -07001890 if (skb_bond_should_drop(skb)) {
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001891 kfree_skb(skb);
1892 return NULL;
1893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 skb->dev = dev->master;
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001895 }
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001896
1897 return dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898}
1899
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001900
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901static void net_tx_action(struct softirq_action *h)
1902{
1903 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1904
1905 if (sd->completion_queue) {
1906 struct sk_buff *clist;
1907
1908 local_irq_disable();
1909 clist = sd->completion_queue;
1910 sd->completion_queue = NULL;
1911 local_irq_enable();
1912
1913 while (clist) {
1914 struct sk_buff *skb = clist;
1915 clist = clist->next;
1916
1917 BUG_TRAP(!atomic_read(&skb->users));
1918 __kfree_skb(skb);
1919 }
1920 }
1921
1922 if (sd->output_queue) {
David S. Milleree609cb2008-07-08 22:58:37 -07001923 struct netdev_queue *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 local_irq_disable();
1926 head = sd->output_queue;
1927 sd->output_queue = NULL;
1928 local_irq_enable();
1929
1930 while (head) {
David S. Milleree609cb2008-07-08 22:58:37 -07001931 struct netdev_queue *txq = head;
1932 struct net_device *dev = txq->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 head = head->next_sched;
1934
1935 smp_mb__before_clear_bit();
1936 clear_bit(__LINK_STATE_SCHED, &dev->state);
1937
David S. Millerdc2b4842008-07-08 17:18:23 -07001938 if (spin_trylock(&txq->lock)) {
David S. Millereb6aafe2008-07-08 23:12:38 -07001939 qdisc_run(txq);
David S. Millerdc2b4842008-07-08 17:18:23 -07001940 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 } else {
David S. Miller86d804e2008-07-08 23:11:25 -07001942 netif_schedule_queue(txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 }
1944 }
1945 }
1946}
1947
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001948static inline int deliver_skb(struct sk_buff *skb,
1949 struct packet_type *pt_prev,
1950 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951{
1952 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001953 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954}
1955
1956#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07001957/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958struct net_bridge;
1959struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1960 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07001961void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Stephen Hemminger6229e362007-03-21 13:38:47 -07001963/*
1964 * If bridge module is loaded call bridging hook.
1965 * returns NULL if packet was consumed.
1966 */
1967struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1968 struct sk_buff *skb) __read_mostly;
1969static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1970 struct packet_type **pt_prev, int *ret,
1971 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 struct net_bridge_port *port;
1974
Stephen Hemminger6229e362007-03-21 13:38:47 -07001975 if (skb->pkt_type == PACKET_LOOPBACK ||
1976 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1977 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07001980 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001982 }
1983
Stephen Hemminger6229e362007-03-21 13:38:47 -07001984 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985}
1986#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07001987#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988#endif
1989
Patrick McHardyb863ceb2007-07-14 18:55:06 -07001990#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1991struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1992EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1993
1994static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1995 struct packet_type **pt_prev,
1996 int *ret,
1997 struct net_device *orig_dev)
1998{
1999 if (skb->dev->macvlan_port == NULL)
2000 return skb;
2001
2002 if (*pt_prev) {
2003 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2004 *pt_prev = NULL;
2005 }
2006 return macvlan_handle_frame_hook(skb);
2007}
2008#else
2009#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2010#endif
2011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012#ifdef CONFIG_NET_CLS_ACT
2013/* TODO: Maybe we should just force sch_ingress to be compiled in
2014 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2015 * a compare and 2 stores extra right now if we dont have it on
2016 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002017 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 * the ingress scheduler, you just cant add policies on ingress.
2019 *
2020 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002021static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002024 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002025 struct netdev_queue *rxq;
2026 int result = TC_ACT_OK;
2027 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002028
Herbert Xuf697c3e2007-10-14 00:38:47 -07002029 if (MAX_RED_LOOP < ttl++) {
2030 printk(KERN_WARNING
2031 "Redir loop detected Dropping packet (%d->%d)\n",
2032 skb->iif, dev->ifindex);
2033 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
2035
Herbert Xuf697c3e2007-10-14 00:38:47 -07002036 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2037 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2038
David S. Miller555353c2008-07-08 17:33:13 -07002039 rxq = &dev->rx_queue;
2040
2041 spin_lock(&rxq->lock);
David S. Miller816f3252008-07-08 22:49:00 -07002042 if ((q = rxq->qdisc) != NULL)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002043 result = q->enqueue(skb, q);
David S. Miller555353c2008-07-08 17:33:13 -07002044 spin_unlock(&rxq->lock);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 return result;
2047}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002048
2049static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2050 struct packet_type **pt_prev,
2051 int *ret, struct net_device *orig_dev)
2052{
David S. Miller816f3252008-07-08 22:49:00 -07002053 if (!skb->dev->rx_queue.qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002054 goto out;
2055
2056 if (*pt_prev) {
2057 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2058 *pt_prev = NULL;
2059 } else {
2060 /* Huh? Why does turning on AF_PACKET affect this? */
2061 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2062 }
2063
2064 switch (ing_filter(skb)) {
2065 case TC_ACT_SHOT:
2066 case TC_ACT_STOLEN:
2067 kfree_skb(skb);
2068 return NULL;
2069 }
2070
2071out:
2072 skb->tc_verd = 0;
2073 return skb;
2074}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075#endif
2076
Patrick McHardybc1d0412008-07-14 22:49:30 -07002077/*
2078 * netif_nit_deliver - deliver received packets to network taps
2079 * @skb: buffer
2080 *
2081 * This function is used to deliver incoming packets to network
2082 * taps. It should be used when the normal netif_receive_skb path
2083 * is bypassed, for example because of VLAN acceleration.
2084 */
2085void netif_nit_deliver(struct sk_buff *skb)
2086{
2087 struct packet_type *ptype;
2088
2089 if (list_empty(&ptype_all))
2090 return;
2091
2092 skb_reset_network_header(skb);
2093 skb_reset_transport_header(skb);
2094 skb->mac_len = skb->network_header - skb->mac_header;
2095
2096 rcu_read_lock();
2097 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2098 if (!ptype->dev || ptype->dev == skb->dev)
2099 deliver_skb(skb, ptype, skb->dev);
2100 }
2101 rcu_read_unlock();
2102}
2103
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002104/**
2105 * netif_receive_skb - process receive buffer from network
2106 * @skb: buffer to process
2107 *
2108 * netif_receive_skb() is the main receive data processing function.
2109 * It always succeeds. The buffer may be dropped during processing
2110 * for congestion control or by the protocol layers.
2111 *
2112 * This function may only be called from softirq context and interrupts
2113 * should be enabled.
2114 *
2115 * Return values (usually ignored):
2116 * NET_RX_SUCCESS: no congestion
2117 * NET_RX_DROP: packet was dropped
2118 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119int netif_receive_skb(struct sk_buff *skb)
2120{
2121 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002122 struct net_device *orig_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002124 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002127 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 return NET_RX_DROP;
2129
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002130 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002131 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Patrick McHardyc01003c2007-03-29 11:46:52 -07002133 if (!skb->iif)
2134 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002135
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002136 orig_dev = skb_bond(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002138 if (!orig_dev)
2139 return NET_RX_DROP;
2140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 __get_cpu_var(netdev_rx_stat).total++;
2142
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002143 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002144 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002145 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
2147 pt_prev = NULL;
2148
2149 rcu_read_lock();
2150
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002151 /* Don't receive packets in an exiting network namespace */
2152 if (!net_alive(dev_net(skb->dev)))
2153 goto out;
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155#ifdef CONFIG_NET_CLS_ACT
2156 if (skb->tc_verd & TC_NCLS) {
2157 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2158 goto ncls;
2159 }
2160#endif
2161
2162 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2163 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002164 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002165 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 pt_prev = ptype;
2167 }
2168 }
2169
2170#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002171 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2172 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174ncls:
2175#endif
2176
Stephen Hemminger6229e362007-03-21 13:38:47 -07002177 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2178 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002180 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2181 if (!skb)
2182 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002185 list_for_each_entry_rcu(ptype,
2186 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 if (ptype->type == type &&
2188 (!ptype->dev || ptype->dev == skb->dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002189 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002190 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 pt_prev = ptype;
2192 }
2193 }
2194
2195 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002196 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 } else {
2198 kfree_skb(skb);
2199 /* Jamal, now you will not able to escape explaining
2200 * me how you were going to use this. :-)
2201 */
2202 ret = NET_RX_DROP;
2203 }
2204
2205out:
2206 rcu_read_unlock();
2207 return ret;
2208}
2209
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002210static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
2212 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2214 unsigned long start_time = jiffies;
2215
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002216 napi->weight = weight_p;
2217 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 struct sk_buff *skb;
2219 struct net_device *dev;
2220
2221 local_irq_disable();
2222 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002223 if (!skb) {
2224 __napi_complete(napi);
2225 local_irq_enable();
2226 break;
2227 }
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 local_irq_enable();
2230
2231 dev = skb->dev;
2232
2233 netif_receive_skb(skb);
2234
2235 dev_put(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002236 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002238 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239}
2240
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002241/**
2242 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002243 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002244 *
2245 * The entry's receive function will be scheduled to run
2246 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002247void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002248{
2249 unsigned long flags;
2250
2251 local_irq_save(flags);
2252 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2253 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2254 local_irq_restore(flags);
2255}
2256EXPORT_SYMBOL(__napi_schedule);
2257
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259static void net_rx_action(struct softirq_action *h)
2260{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002261 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002263 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002264 void *have;
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 local_irq_disable();
2267
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002268 while (!list_empty(list)) {
2269 struct napi_struct *n;
2270 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002272 /* If softirq window is exhuasted then punt.
2273 *
2274 * Note that this is a slight policy change from the
2275 * previous NAPI code, which would allow up to 2
2276 * jiffies to pass before breaking out. The test
2277 * used to be "jiffies - start_time > 1".
2278 */
2279 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 goto softnet_break;
2281
2282 local_irq_enable();
2283
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002284 /* Even though interrupts have been re-enabled, this
2285 * access is safe because interrupts can only add new
2286 * entries to the tail of this list, and only ->poll()
2287 * calls can remove this head entry from the list.
2288 */
2289 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002291 have = netpoll_poll_lock(n);
2292
2293 weight = n->weight;
2294
David S. Miller0a7606c2007-10-29 21:28:47 -07002295 /* This NAPI_STATE_SCHED test is for avoiding a race
2296 * with netpoll's poll_napi(). Only the entity which
2297 * obtains the lock and sees NAPI_STATE_SCHED set will
2298 * actually make the ->poll() call. Therefore we avoid
2299 * accidently calling ->poll() when NAPI is not scheduled.
2300 */
2301 work = 0;
2302 if (test_bit(NAPI_STATE_SCHED, &n->state))
2303 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002304
2305 WARN_ON_ONCE(work > weight);
2306
2307 budget -= work;
2308
2309 local_irq_disable();
2310
2311 /* Drivers must not modify the NAPI state if they
2312 * consume the entire weight. In such cases this code
2313 * still "owns" the NAPI instance and therefore can
2314 * move the instance around on the list at-will.
2315 */
David S. Millerfed17f32008-01-07 21:00:40 -08002316 if (unlikely(work == weight)) {
2317 if (unlikely(napi_disable_pending(n)))
2318 __napi_complete(n);
2319 else
2320 list_move_tail(&n->poll_list, list);
2321 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002322
2323 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 }
2325out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002326 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002327
Chris Leechdb217332006-06-17 21:24:58 -07002328#ifdef CONFIG_NET_DMA
2329 /*
2330 * There may not be any more sk_buffs coming right now, so push
2331 * any pending DMA copies to hardware
2332 */
Dan Williamsd379b012007-07-09 11:56:42 -07002333 if (!cpus_empty(net_dma.channel_mask)) {
2334 int chan_idx;
2335 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2336 struct dma_chan *chan = net_dma.channels[chan_idx];
2337 if (chan)
2338 dma_async_memcpy_issue_pending(chan);
2339 }
Chris Leechdb217332006-06-17 21:24:58 -07002340 }
2341#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 return;
2344
2345softnet_break:
2346 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2347 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2348 goto out;
2349}
2350
2351static gifconf_func_t * gifconf_list [NPROTO];
2352
2353/**
2354 * register_gifconf - register a SIOCGIF handler
2355 * @family: Address family
2356 * @gifconf: Function handler
2357 *
2358 * Register protocol dependent address dumping routines. The handler
2359 * that is passed must not be freed or reused until it has been replaced
2360 * by another handler.
2361 */
2362int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2363{
2364 if (family >= NPROTO)
2365 return -EINVAL;
2366 gifconf_list[family] = gifconf;
2367 return 0;
2368}
2369
2370
2371/*
2372 * Map an interface index to its name (SIOCGIFNAME)
2373 */
2374
2375/*
2376 * We need this ioctl for efficient implementation of the
2377 * if_indextoname() function required by the IPv6 API. Without
2378 * it, we would have to search all the interfaces to find a
2379 * match. --pb
2380 */
2381
Eric W. Biederman881d9662007-09-17 11:56:21 -07002382static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383{
2384 struct net_device *dev;
2385 struct ifreq ifr;
2386
2387 /*
2388 * Fetch the caller's info block.
2389 */
2390
2391 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2392 return -EFAULT;
2393
2394 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002395 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 if (!dev) {
2397 read_unlock(&dev_base_lock);
2398 return -ENODEV;
2399 }
2400
2401 strcpy(ifr.ifr_name, dev->name);
2402 read_unlock(&dev_base_lock);
2403
2404 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2405 return -EFAULT;
2406 return 0;
2407}
2408
2409/*
2410 * Perform a SIOCGIFCONF call. This structure will change
2411 * size eventually, and there is nothing I can do about it.
2412 * Thus we will need a 'compatibility mode'.
2413 */
2414
Eric W. Biederman881d9662007-09-17 11:56:21 -07002415static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416{
2417 struct ifconf ifc;
2418 struct net_device *dev;
2419 char __user *pos;
2420 int len;
2421 int total;
2422 int i;
2423
2424 /*
2425 * Fetch the caller's info block.
2426 */
2427
2428 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2429 return -EFAULT;
2430
2431 pos = ifc.ifc_buf;
2432 len = ifc.ifc_len;
2433
2434 /*
2435 * Loop over the interfaces, and write an info block for each.
2436 */
2437
2438 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002439 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 for (i = 0; i < NPROTO; i++) {
2441 if (gifconf_list[i]) {
2442 int done;
2443 if (!pos)
2444 done = gifconf_list[i](dev, NULL, 0);
2445 else
2446 done = gifconf_list[i](dev, pos + total,
2447 len - total);
2448 if (done < 0)
2449 return -EFAULT;
2450 total += done;
2451 }
2452 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
2455 /*
2456 * All done. Write the updated control block back to the caller.
2457 */
2458 ifc.ifc_len = total;
2459
2460 /*
2461 * Both BSD and Solaris return 0 here, so we do too.
2462 */
2463 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2464}
2465
2466#ifdef CONFIG_PROC_FS
2467/*
2468 * This is invoked by the /proc filesystem handler to display a device
2469 * in detail.
2470 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002472 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473{
Denis V. Luneve372c412007-11-19 22:31:54 -08002474 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002475 loff_t off;
2476 struct net_device *dev;
2477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002479 if (!*pos)
2480 return SEQ_START_TOKEN;
2481
2482 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002483 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002484 if (off++ == *pos)
2485 return dev;
2486
2487 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488}
2489
2490void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2491{
Denis V. Luneve372c412007-11-19 22:31:54 -08002492 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002494 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002495 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496}
2497
2498void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002499 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500{
2501 read_unlock(&dev_base_lock);
2502}
2503
2504static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2505{
Rusty Russellc45d2862007-03-28 14:29:08 -07002506 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507
Rusty Russell5a1b5892007-04-28 21:04:03 -07002508 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2509 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2510 dev->name, stats->rx_bytes, stats->rx_packets,
2511 stats->rx_errors,
2512 stats->rx_dropped + stats->rx_missed_errors,
2513 stats->rx_fifo_errors,
2514 stats->rx_length_errors + stats->rx_over_errors +
2515 stats->rx_crc_errors + stats->rx_frame_errors,
2516 stats->rx_compressed, stats->multicast,
2517 stats->tx_bytes, stats->tx_packets,
2518 stats->tx_errors, stats->tx_dropped,
2519 stats->tx_fifo_errors, stats->collisions,
2520 stats->tx_carrier_errors +
2521 stats->tx_aborted_errors +
2522 stats->tx_window_errors +
2523 stats->tx_heartbeat_errors,
2524 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525}
2526
2527/*
2528 * Called from the PROCfs module. This now uses the new arbitrary sized
2529 * /proc/net interface to create /proc/net/dev
2530 */
2531static int dev_seq_show(struct seq_file *seq, void *v)
2532{
2533 if (v == SEQ_START_TOKEN)
2534 seq_puts(seq, "Inter-| Receive "
2535 " | Transmit\n"
2536 " face |bytes packets errs drop fifo frame "
2537 "compressed multicast|bytes packets errs "
2538 "drop fifo colls carrier compressed\n");
2539 else
2540 dev_seq_printf_stats(seq, v);
2541 return 0;
2542}
2543
2544static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2545{
2546 struct netif_rx_stats *rc = NULL;
2547
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002548 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002549 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 rc = &per_cpu(netdev_rx_stat, *pos);
2551 break;
2552 } else
2553 ++*pos;
2554 return rc;
2555}
2556
2557static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2558{
2559 return softnet_get_online(pos);
2560}
2561
2562static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2563{
2564 ++*pos;
2565 return softnet_get_online(pos);
2566}
2567
2568static void softnet_seq_stop(struct seq_file *seq, void *v)
2569{
2570}
2571
2572static int softnet_seq_show(struct seq_file *seq, void *v)
2573{
2574 struct netif_rx_stats *s = v;
2575
2576 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002577 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002578 0, 0, 0, 0, /* was fastroute */
2579 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 return 0;
2581}
2582
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002583static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 .start = dev_seq_start,
2585 .next = dev_seq_next,
2586 .stop = dev_seq_stop,
2587 .show = dev_seq_show,
2588};
2589
2590static int dev_seq_open(struct inode *inode, struct file *file)
2591{
Denis V. Luneve372c412007-11-19 22:31:54 -08002592 return seq_open_net(inode, file, &dev_seq_ops,
2593 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594}
2595
Arjan van de Ven9a321442007-02-12 00:55:35 -08002596static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 .owner = THIS_MODULE,
2598 .open = dev_seq_open,
2599 .read = seq_read,
2600 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002601 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602};
2603
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002604static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 .start = softnet_seq_start,
2606 .next = softnet_seq_next,
2607 .stop = softnet_seq_stop,
2608 .show = softnet_seq_show,
2609};
2610
2611static int softnet_seq_open(struct inode *inode, struct file *file)
2612{
2613 return seq_open(file, &softnet_seq_ops);
2614}
2615
Arjan van de Ven9a321442007-02-12 00:55:35 -08002616static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 .owner = THIS_MODULE,
2618 .open = softnet_seq_open,
2619 .read = seq_read,
2620 .llseek = seq_lseek,
2621 .release = seq_release,
2622};
2623
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002624static void *ptype_get_idx(loff_t pos)
2625{
2626 struct packet_type *pt = NULL;
2627 loff_t i = 0;
2628 int t;
2629
2630 list_for_each_entry_rcu(pt, &ptype_all, list) {
2631 if (i == pos)
2632 return pt;
2633 ++i;
2634 }
2635
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002636 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002637 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2638 if (i == pos)
2639 return pt;
2640 ++i;
2641 }
2642 }
2643 return NULL;
2644}
2645
2646static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002647 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002648{
2649 rcu_read_lock();
2650 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2651}
2652
2653static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2654{
2655 struct packet_type *pt;
2656 struct list_head *nxt;
2657 int hash;
2658
2659 ++*pos;
2660 if (v == SEQ_START_TOKEN)
2661 return ptype_get_idx(0);
2662
2663 pt = v;
2664 nxt = pt->list.next;
2665 if (pt->type == htons(ETH_P_ALL)) {
2666 if (nxt != &ptype_all)
2667 goto found;
2668 hash = 0;
2669 nxt = ptype_base[0].next;
2670 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002671 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002672
2673 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002674 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002675 return NULL;
2676 nxt = ptype_base[hash].next;
2677 }
2678found:
2679 return list_entry(nxt, struct packet_type, list);
2680}
2681
2682static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002683 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002684{
2685 rcu_read_unlock();
2686}
2687
2688static void ptype_seq_decode(struct seq_file *seq, void *sym)
2689{
2690#ifdef CONFIG_KALLSYMS
2691 unsigned long offset = 0, symsize;
2692 const char *symname;
2693 char *modname;
2694 char namebuf[128];
2695
2696 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2697 &modname, namebuf);
2698
2699 if (symname) {
2700 char *delim = ":";
2701
2702 if (!modname)
2703 modname = delim = "";
2704 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2705 symname, offset);
2706 return;
2707 }
2708#endif
2709
2710 seq_printf(seq, "[%p]", sym);
2711}
2712
2713static int ptype_seq_show(struct seq_file *seq, void *v)
2714{
2715 struct packet_type *pt = v;
2716
2717 if (v == SEQ_START_TOKEN)
2718 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002719 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002720 if (pt->type == htons(ETH_P_ALL))
2721 seq_puts(seq, "ALL ");
2722 else
2723 seq_printf(seq, "%04x", ntohs(pt->type));
2724
2725 seq_printf(seq, " %-8s ",
2726 pt->dev ? pt->dev->name : "");
2727 ptype_seq_decode(seq, pt->func);
2728 seq_putc(seq, '\n');
2729 }
2730
2731 return 0;
2732}
2733
2734static const struct seq_operations ptype_seq_ops = {
2735 .start = ptype_seq_start,
2736 .next = ptype_seq_next,
2737 .stop = ptype_seq_stop,
2738 .show = ptype_seq_show,
2739};
2740
2741static int ptype_seq_open(struct inode *inode, struct file *file)
2742{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002743 return seq_open_net(inode, file, &ptype_seq_ops,
2744 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002745}
2746
2747static const struct file_operations ptype_seq_fops = {
2748 .owner = THIS_MODULE,
2749 .open = ptype_seq_open,
2750 .read = seq_read,
2751 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002752 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002753};
2754
2755
Pavel Emelyanov46650792007-10-08 20:38:39 -07002756static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757{
2758 int rc = -ENOMEM;
2759
Eric W. Biederman881d9662007-09-17 11:56:21 -07002760 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002762 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002764 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002765 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002766
Eric W. Biederman881d9662007-09-17 11:56:21 -07002767 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002768 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 rc = 0;
2770out:
2771 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002772out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002773 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002775 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002777 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 goto out;
2779}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002780
Pavel Emelyanov46650792007-10-08 20:38:39 -07002781static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002782{
2783 wext_proc_exit(net);
2784
2785 proc_net_remove(net, "ptype");
2786 proc_net_remove(net, "softnet_stat");
2787 proc_net_remove(net, "dev");
2788}
2789
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002790static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002791 .init = dev_proc_net_init,
2792 .exit = dev_proc_net_exit,
2793};
2794
2795static int __init dev_proc_init(void)
2796{
2797 return register_pernet_subsys(&dev_proc_ops);
2798}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799#else
2800#define dev_proc_init() 0
2801#endif /* CONFIG_PROC_FS */
2802
2803
2804/**
2805 * netdev_set_master - set up master/slave pair
2806 * @slave: slave device
2807 * @master: new master device
2808 *
2809 * Changes the master device of the slave. Pass %NULL to break the
2810 * bonding. The caller must hold the RTNL semaphore. On a failure
2811 * a negative errno code is returned. On success the reference counts
2812 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2813 * function returns zero.
2814 */
2815int netdev_set_master(struct net_device *slave, struct net_device *master)
2816{
2817 struct net_device *old = slave->master;
2818
2819 ASSERT_RTNL();
2820
2821 if (master) {
2822 if (old)
2823 return -EBUSY;
2824 dev_hold(master);
2825 }
2826
2827 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002828
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 synchronize_net();
2830
2831 if (old)
2832 dev_put(old);
2833
2834 if (master)
2835 slave->flags |= IFF_SLAVE;
2836 else
2837 slave->flags &= ~IFF_SLAVE;
2838
2839 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2840 return 0;
2841}
2842
Wang Chendad9b332008-06-18 01:48:28 -07002843static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002844{
2845 unsigned short old_flags = dev->flags;
2846
Patrick McHardy24023452007-07-14 18:51:31 -07002847 ASSERT_RTNL();
2848
Wang Chendad9b332008-06-18 01:48:28 -07002849 dev->flags |= IFF_PROMISC;
2850 dev->promiscuity += inc;
2851 if (dev->promiscuity == 0) {
2852 /*
2853 * Avoid overflow.
2854 * If inc causes overflow, untouch promisc and return error.
2855 */
2856 if (inc < 0)
2857 dev->flags &= ~IFF_PROMISC;
2858 else {
2859 dev->promiscuity -= inc;
2860 printk(KERN_WARNING "%s: promiscuity touches roof, "
2861 "set promiscuity failed, promiscuity feature "
2862 "of device might be broken.\n", dev->name);
2863 return -EOVERFLOW;
2864 }
2865 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002866 if (dev->flags != old_flags) {
2867 printk(KERN_INFO "device %s %s promiscuous mode\n",
2868 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2869 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002870 if (audit_enabled)
2871 audit_log(current->audit_context, GFP_ATOMIC,
2872 AUDIT_ANOM_PROMISCUOUS,
2873 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2874 dev->name, (dev->flags & IFF_PROMISC),
2875 (old_flags & IFF_PROMISC),
2876 audit_get_loginuid(current),
2877 current->uid, current->gid,
2878 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002879
2880 if (dev->change_rx_flags)
2881 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002882 }
Wang Chendad9b332008-06-18 01:48:28 -07002883 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002884}
2885
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886/**
2887 * dev_set_promiscuity - update promiscuity count on a device
2888 * @dev: device
2889 * @inc: modifier
2890 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002891 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 * remains above zero the interface remains promiscuous. Once it hits zero
2893 * the device reverts back to normal filtering operation. A negative inc
2894 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002895 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 */
Wang Chendad9b332008-06-18 01:48:28 -07002897int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002900 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
Wang Chendad9b332008-06-18 01:48:28 -07002902 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002903 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002904 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002905 if (dev->flags != old_flags)
2906 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002907 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908}
2909
2910/**
2911 * dev_set_allmulti - update allmulti count on a device
2912 * @dev: device
2913 * @inc: modifier
2914 *
2915 * Add or remove reception of all multicast frames to a device. While the
2916 * count in the device remains above zero the interface remains listening
2917 * to all interfaces. Once it hits zero the device reverts back to normal
2918 * filtering operation. A negative @inc value is used to drop the counter
2919 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002920 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 */
2922
Wang Chendad9b332008-06-18 01:48:28 -07002923int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924{
2925 unsigned short old_flags = dev->flags;
2926
Patrick McHardy24023452007-07-14 18:51:31 -07002927 ASSERT_RTNL();
2928
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07002930 dev->allmulti += inc;
2931 if (dev->allmulti == 0) {
2932 /*
2933 * Avoid overflow.
2934 * If inc causes overflow, untouch allmulti and return error.
2935 */
2936 if (inc < 0)
2937 dev->flags &= ~IFF_ALLMULTI;
2938 else {
2939 dev->allmulti -= inc;
2940 printk(KERN_WARNING "%s: allmulti touches roof, "
2941 "set allmulti failed, allmulti feature of "
2942 "device might be broken.\n", dev->name);
2943 return -EOVERFLOW;
2944 }
2945 }
Patrick McHardy24023452007-07-14 18:51:31 -07002946 if (dev->flags ^ old_flags) {
2947 if (dev->change_rx_flags)
2948 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07002949 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07002950 }
Wang Chendad9b332008-06-18 01:48:28 -07002951 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002952}
2953
2954/*
2955 * Upload unicast and multicast address lists to device and
2956 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08002957 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07002958 * are present.
2959 */
2960void __dev_set_rx_mode(struct net_device *dev)
2961{
2962 /* dev_open will call this function so the list will stay sane. */
2963 if (!(dev->flags&IFF_UP))
2964 return;
2965
2966 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09002967 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07002968
2969 if (dev->set_rx_mode)
2970 dev->set_rx_mode(dev);
2971 else {
2972 /* Unicast addresses changes may only happen under the rtnl,
2973 * therefore calling __dev_set_promiscuity here is safe.
2974 */
2975 if (dev->uc_count > 0 && !dev->uc_promisc) {
2976 __dev_set_promiscuity(dev, 1);
2977 dev->uc_promisc = 1;
2978 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2979 __dev_set_promiscuity(dev, -1);
2980 dev->uc_promisc = 0;
2981 }
2982
2983 if (dev->set_multicast_list)
2984 dev->set_multicast_list(dev);
2985 }
2986}
2987
2988void dev_set_rx_mode(struct net_device *dev)
2989{
David S. Millerb9e40852008-07-15 00:15:08 -07002990 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07002991 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07002992 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993}
2994
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002995int __dev_addr_delete(struct dev_addr_list **list, int *count,
2996 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002997{
2998 struct dev_addr_list *da;
2999
3000 for (; (da = *list) != NULL; list = &da->next) {
3001 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3002 alen == da->da_addrlen) {
3003 if (glbl) {
3004 int old_glbl = da->da_gusers;
3005 da->da_gusers = 0;
3006 if (old_glbl == 0)
3007 break;
3008 }
3009 if (--da->da_users)
3010 return 0;
3011
3012 *list = da->next;
3013 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003014 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003015 return 0;
3016 }
3017 }
3018 return -ENOENT;
3019}
3020
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003021int __dev_addr_add(struct dev_addr_list **list, int *count,
3022 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003023{
3024 struct dev_addr_list *da;
3025
3026 for (da = *list; da != NULL; da = da->next) {
3027 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3028 da->da_addrlen == alen) {
3029 if (glbl) {
3030 int old_glbl = da->da_gusers;
3031 da->da_gusers = 1;
3032 if (old_glbl)
3033 return 0;
3034 }
3035 da->da_users++;
3036 return 0;
3037 }
3038 }
3039
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003040 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003041 if (da == NULL)
3042 return -ENOMEM;
3043 memcpy(da->da_addr, addr, alen);
3044 da->da_addrlen = alen;
3045 da->da_users = 1;
3046 da->da_gusers = glbl ? 1 : 0;
3047 da->next = *list;
3048 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003049 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003050 return 0;
3051}
3052
Patrick McHardy4417da62007-06-27 01:28:10 -07003053/**
3054 * dev_unicast_delete - Release secondary unicast address.
3055 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003056 * @addr: address to delete
3057 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003058 *
3059 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003060 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003061 *
3062 * The caller must hold the rtnl_mutex.
3063 */
3064int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3065{
3066 int err;
3067
3068 ASSERT_RTNL();
3069
David S. Millerb9e40852008-07-15 00:15:08 -07003070 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003071 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3072 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003073 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003074 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003075 return err;
3076}
3077EXPORT_SYMBOL(dev_unicast_delete);
3078
3079/**
3080 * dev_unicast_add - add a secondary unicast address
3081 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003082 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003083 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003084 *
3085 * Add a secondary unicast address to the device or increase
3086 * the reference count if it already exists.
3087 *
3088 * The caller must hold the rtnl_mutex.
3089 */
3090int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3091{
3092 int err;
3093
3094 ASSERT_RTNL();
3095
David S. Millerb9e40852008-07-15 00:15:08 -07003096 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003097 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3098 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003099 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003100 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003101 return err;
3102}
3103EXPORT_SYMBOL(dev_unicast_add);
3104
Chris Leeche83a2ea2008-01-31 16:53:23 -08003105int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3106 struct dev_addr_list **from, int *from_count)
3107{
3108 struct dev_addr_list *da, *next;
3109 int err = 0;
3110
3111 da = *from;
3112 while (da != NULL) {
3113 next = da->next;
3114 if (!da->da_synced) {
3115 err = __dev_addr_add(to, to_count,
3116 da->da_addr, da->da_addrlen, 0);
3117 if (err < 0)
3118 break;
3119 da->da_synced = 1;
3120 da->da_users++;
3121 } else if (da->da_users == 1) {
3122 __dev_addr_delete(to, to_count,
3123 da->da_addr, da->da_addrlen, 0);
3124 __dev_addr_delete(from, from_count,
3125 da->da_addr, da->da_addrlen, 0);
3126 }
3127 da = next;
3128 }
3129 return err;
3130}
3131
3132void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3133 struct dev_addr_list **from, int *from_count)
3134{
3135 struct dev_addr_list *da, *next;
3136
3137 da = *from;
3138 while (da != NULL) {
3139 next = da->next;
3140 if (da->da_synced) {
3141 __dev_addr_delete(to, to_count,
3142 da->da_addr, da->da_addrlen, 0);
3143 da->da_synced = 0;
3144 __dev_addr_delete(from, from_count,
3145 da->da_addr, da->da_addrlen, 0);
3146 }
3147 da = next;
3148 }
3149}
3150
3151/**
3152 * dev_unicast_sync - Synchronize device's unicast list to another device
3153 * @to: destination device
3154 * @from: source device
3155 *
3156 * Add newly added addresses to the destination device and release
3157 * addresses that have no users left. The source device must be
3158 * locked by netif_tx_lock_bh.
3159 *
3160 * This function is intended to be called from the dev->set_rx_mode
3161 * function of layered software devices.
3162 */
3163int dev_unicast_sync(struct net_device *to, struct net_device *from)
3164{
3165 int err = 0;
3166
David S. Millerb9e40852008-07-15 00:15:08 -07003167 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003168 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3169 &from->uc_list, &from->uc_count);
3170 if (!err)
3171 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003172 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003173 return err;
3174}
3175EXPORT_SYMBOL(dev_unicast_sync);
3176
3177/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003178 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003179 * @to: destination device
3180 * @from: source device
3181 *
3182 * Remove all addresses that were added to the destination device by
3183 * dev_unicast_sync(). This function is intended to be called from the
3184 * dev->stop function of layered software devices.
3185 */
3186void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3187{
David S. Millerb9e40852008-07-15 00:15:08 -07003188 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003189 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003190
3191 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3192 &from->uc_list, &from->uc_count);
3193 __dev_set_rx_mode(to);
3194
David S. Millere308a5d2008-07-15 00:13:44 -07003195 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003196 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003197}
3198EXPORT_SYMBOL(dev_unicast_unsync);
3199
Denis Cheng12972622007-07-18 02:12:56 -07003200static void __dev_addr_discard(struct dev_addr_list **list)
3201{
3202 struct dev_addr_list *tmp;
3203
3204 while (*list != NULL) {
3205 tmp = *list;
3206 *list = tmp->next;
3207 if (tmp->da_users > tmp->da_gusers)
3208 printk("__dev_addr_discard: address leakage! "
3209 "da_users=%d\n", tmp->da_users);
3210 kfree(tmp);
3211 }
3212}
3213
Denis Cheng26cc2522007-07-18 02:12:03 -07003214static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003215{
David S. Millerb9e40852008-07-15 00:15:08 -07003216 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003217
Patrick McHardy4417da62007-06-27 01:28:10 -07003218 __dev_addr_discard(&dev->uc_list);
3219 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003220
Denis Cheng456ad752007-07-18 02:10:54 -07003221 __dev_addr_discard(&dev->mc_list);
3222 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003223
David S. Millerb9e40852008-07-15 00:15:08 -07003224 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003225}
3226
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227unsigned dev_get_flags(const struct net_device *dev)
3228{
3229 unsigned flags;
3230
3231 flags = (dev->flags & ~(IFF_PROMISC |
3232 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003233 IFF_RUNNING |
3234 IFF_LOWER_UP |
3235 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 (dev->gflags & (IFF_PROMISC |
3237 IFF_ALLMULTI));
3238
Stefan Rompfb00055a2006-03-20 17:09:11 -08003239 if (netif_running(dev)) {
3240 if (netif_oper_up(dev))
3241 flags |= IFF_RUNNING;
3242 if (netif_carrier_ok(dev))
3243 flags |= IFF_LOWER_UP;
3244 if (netif_dormant(dev))
3245 flags |= IFF_DORMANT;
3246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
3248 return flags;
3249}
3250
3251int dev_change_flags(struct net_device *dev, unsigned flags)
3252{
Thomas Graf7c355f52007-06-05 16:03:03 -07003253 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254 int old_flags = dev->flags;
3255
Patrick McHardy24023452007-07-14 18:51:31 -07003256 ASSERT_RTNL();
3257
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258 /*
3259 * Set the flags on our device.
3260 */
3261
3262 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3263 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3264 IFF_AUTOMEDIA)) |
3265 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3266 IFF_ALLMULTI));
3267
3268 /*
3269 * Load in the correct multicast list now the flags have changed.
3270 */
3271
David Woodhouse0e917962008-05-20 14:36:14 -07003272 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003273 dev->change_rx_flags(dev, IFF_MULTICAST);
3274
Patrick McHardy4417da62007-06-27 01:28:10 -07003275 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276
3277 /*
3278 * Have we downed the interface. We handle IFF_UP ourselves
3279 * according to user attempts to set it, rather than blindly
3280 * setting it.
3281 */
3282
3283 ret = 0;
3284 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3285 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3286
3287 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003288 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289 }
3290
3291 if (dev->flags & IFF_UP &&
3292 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3293 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003294 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295
3296 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3297 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3298 dev->gflags ^= IFF_PROMISC;
3299 dev_set_promiscuity(dev, inc);
3300 }
3301
3302 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3303 is important. Some (broken) drivers set IFF_PROMISC, when
3304 IFF_ALLMULTI is requested not asking us and not reporting.
3305 */
3306 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3307 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3308 dev->gflags ^= IFF_ALLMULTI;
3309 dev_set_allmulti(dev, inc);
3310 }
3311
Thomas Graf7c355f52007-06-05 16:03:03 -07003312 /* Exclude state transition flags, already notified */
3313 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3314 if (changes)
3315 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316
3317 return ret;
3318}
3319
3320int dev_set_mtu(struct net_device *dev, int new_mtu)
3321{
3322 int err;
3323
3324 if (new_mtu == dev->mtu)
3325 return 0;
3326
3327 /* MTU must be positive. */
3328 if (new_mtu < 0)
3329 return -EINVAL;
3330
3331 if (!netif_device_present(dev))
3332 return -ENODEV;
3333
3334 err = 0;
3335 if (dev->change_mtu)
3336 err = dev->change_mtu(dev, new_mtu);
3337 else
3338 dev->mtu = new_mtu;
3339 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003340 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 return err;
3342}
3343
3344int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3345{
3346 int err;
3347
3348 if (!dev->set_mac_address)
3349 return -EOPNOTSUPP;
3350 if (sa->sa_family != dev->type)
3351 return -EINVAL;
3352 if (!netif_device_present(dev))
3353 return -ENODEV;
3354 err = dev->set_mac_address(dev, sa);
3355 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003356 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 return err;
3358}
3359
3360/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003361 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003363static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364{
3365 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003366 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367
3368 if (!dev)
3369 return -ENODEV;
3370
3371 switch (cmd) {
3372 case SIOCGIFFLAGS: /* Get interface flags */
3373 ifr->ifr_flags = dev_get_flags(dev);
3374 return 0;
3375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 case SIOCGIFMETRIC: /* Get the metric on the interface
3377 (currently unused) */
3378 ifr->ifr_metric = 0;
3379 return 0;
3380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 case SIOCGIFMTU: /* Get the MTU of a device */
3382 ifr->ifr_mtu = dev->mtu;
3383 return 0;
3384
Linus Torvalds1da177e2005-04-16 15:20:36 -07003385 case SIOCGIFHWADDR:
3386 if (!dev->addr_len)
3387 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3388 else
3389 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3390 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3391 ifr->ifr_hwaddr.sa_family = dev->type;
3392 return 0;
3393
Jeff Garzik14e3e072007-10-08 00:06:32 -07003394 case SIOCGIFSLAVE:
3395 err = -EINVAL;
3396 break;
3397
3398 case SIOCGIFMAP:
3399 ifr->ifr_map.mem_start = dev->mem_start;
3400 ifr->ifr_map.mem_end = dev->mem_end;
3401 ifr->ifr_map.base_addr = dev->base_addr;
3402 ifr->ifr_map.irq = dev->irq;
3403 ifr->ifr_map.dma = dev->dma;
3404 ifr->ifr_map.port = dev->if_port;
3405 return 0;
3406
3407 case SIOCGIFINDEX:
3408 ifr->ifr_ifindex = dev->ifindex;
3409 return 0;
3410
3411 case SIOCGIFTXQLEN:
3412 ifr->ifr_qlen = dev->tx_queue_len;
3413 return 0;
3414
3415 default:
3416 /* dev_ioctl() should ensure this case
3417 * is never reached
3418 */
3419 WARN_ON(1);
3420 err = -EINVAL;
3421 break;
3422
3423 }
3424 return err;
3425}
3426
3427/*
3428 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3429 */
3430static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3431{
3432 int err;
3433 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3434
3435 if (!dev)
3436 return -ENODEV;
3437
3438 switch (cmd) {
3439 case SIOCSIFFLAGS: /* Set interface flags */
3440 return dev_change_flags(dev, ifr->ifr_flags);
3441
3442 case SIOCSIFMETRIC: /* Set the metric on the interface
3443 (currently unused) */
3444 return -EOPNOTSUPP;
3445
3446 case SIOCSIFMTU: /* Set the MTU of a device */
3447 return dev_set_mtu(dev, ifr->ifr_mtu);
3448
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449 case SIOCSIFHWADDR:
3450 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3451
3452 case SIOCSIFHWBROADCAST:
3453 if (ifr->ifr_hwaddr.sa_family != dev->type)
3454 return -EINVAL;
3455 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3456 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003457 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 return 0;
3459
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 case SIOCSIFMAP:
3461 if (dev->set_config) {
3462 if (!netif_device_present(dev))
3463 return -ENODEV;
3464 return dev->set_config(dev, &ifr->ifr_map);
3465 }
3466 return -EOPNOTSUPP;
3467
3468 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003469 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3471 return -EINVAL;
3472 if (!netif_device_present(dev))
3473 return -ENODEV;
3474 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3475 dev->addr_len, 1);
3476
3477 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003478 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3480 return -EINVAL;
3481 if (!netif_device_present(dev))
3482 return -ENODEV;
3483 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3484 dev->addr_len, 1);
3485
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 case SIOCSIFTXQLEN:
3487 if (ifr->ifr_qlen < 0)
3488 return -EINVAL;
3489 dev->tx_queue_len = ifr->ifr_qlen;
3490 return 0;
3491
3492 case SIOCSIFNAME:
3493 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3494 return dev_change_name(dev, ifr->ifr_newname);
3495
3496 /*
3497 * Unknown or private ioctl
3498 */
3499
3500 default:
3501 if ((cmd >= SIOCDEVPRIVATE &&
3502 cmd <= SIOCDEVPRIVATE + 15) ||
3503 cmd == SIOCBONDENSLAVE ||
3504 cmd == SIOCBONDRELEASE ||
3505 cmd == SIOCBONDSETHWADDR ||
3506 cmd == SIOCBONDSLAVEINFOQUERY ||
3507 cmd == SIOCBONDINFOQUERY ||
3508 cmd == SIOCBONDCHANGEACTIVE ||
3509 cmd == SIOCGMIIPHY ||
3510 cmd == SIOCGMIIREG ||
3511 cmd == SIOCSMIIREG ||
3512 cmd == SIOCBRADDIF ||
3513 cmd == SIOCBRDELIF ||
3514 cmd == SIOCWANDEV) {
3515 err = -EOPNOTSUPP;
3516 if (dev->do_ioctl) {
3517 if (netif_device_present(dev))
3518 err = dev->do_ioctl(dev, ifr,
3519 cmd);
3520 else
3521 err = -ENODEV;
3522 }
3523 } else
3524 err = -EINVAL;
3525
3526 }
3527 return err;
3528}
3529
3530/*
3531 * This function handles all "interface"-type I/O control requests. The actual
3532 * 'doing' part of this is dev_ifsioc above.
3533 */
3534
3535/**
3536 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003537 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 * @cmd: command to issue
3539 * @arg: pointer to a struct ifreq in user space
3540 *
3541 * Issue ioctl functions to devices. This is normally called by the
3542 * user space syscall interfaces but can sometimes be useful for
3543 * other purposes. The return value is the return from the syscall if
3544 * positive or a negative errno code on error.
3545 */
3546
Eric W. Biederman881d9662007-09-17 11:56:21 -07003547int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548{
3549 struct ifreq ifr;
3550 int ret;
3551 char *colon;
3552
3553 /* One special case: SIOCGIFCONF takes ifconf argument
3554 and requires shared lock, because it sleeps writing
3555 to user space.
3556 */
3557
3558 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003559 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003560 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003561 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 return ret;
3563 }
3564 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003565 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566
3567 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3568 return -EFAULT;
3569
3570 ifr.ifr_name[IFNAMSIZ-1] = 0;
3571
3572 colon = strchr(ifr.ifr_name, ':');
3573 if (colon)
3574 *colon = 0;
3575
3576 /*
3577 * See which interface the caller is talking about.
3578 */
3579
3580 switch (cmd) {
3581 /*
3582 * These ioctl calls:
3583 * - can be done by all.
3584 * - atomic and do not require locking.
3585 * - return a value
3586 */
3587 case SIOCGIFFLAGS:
3588 case SIOCGIFMETRIC:
3589 case SIOCGIFMTU:
3590 case SIOCGIFHWADDR:
3591 case SIOCGIFSLAVE:
3592 case SIOCGIFMAP:
3593 case SIOCGIFINDEX:
3594 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003595 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003597 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 read_unlock(&dev_base_lock);
3599 if (!ret) {
3600 if (colon)
3601 *colon = ':';
3602 if (copy_to_user(arg, &ifr,
3603 sizeof(struct ifreq)))
3604 ret = -EFAULT;
3605 }
3606 return ret;
3607
3608 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003609 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003611 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612 rtnl_unlock();
3613 if (!ret) {
3614 if (colon)
3615 *colon = ':';
3616 if (copy_to_user(arg, &ifr,
3617 sizeof(struct ifreq)))
3618 ret = -EFAULT;
3619 }
3620 return ret;
3621
3622 /*
3623 * These ioctl calls:
3624 * - require superuser power.
3625 * - require strict serialization.
3626 * - return a value
3627 */
3628 case SIOCGMIIPHY:
3629 case SIOCGMIIREG:
3630 case SIOCSIFNAME:
3631 if (!capable(CAP_NET_ADMIN))
3632 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003633 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003635 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 rtnl_unlock();
3637 if (!ret) {
3638 if (colon)
3639 *colon = ':';
3640 if (copy_to_user(arg, &ifr,
3641 sizeof(struct ifreq)))
3642 ret = -EFAULT;
3643 }
3644 return ret;
3645
3646 /*
3647 * These ioctl calls:
3648 * - require superuser power.
3649 * - require strict serialization.
3650 * - do not return a value
3651 */
3652 case SIOCSIFFLAGS:
3653 case SIOCSIFMETRIC:
3654 case SIOCSIFMTU:
3655 case SIOCSIFMAP:
3656 case SIOCSIFHWADDR:
3657 case SIOCSIFSLAVE:
3658 case SIOCADDMULTI:
3659 case SIOCDELMULTI:
3660 case SIOCSIFHWBROADCAST:
3661 case SIOCSIFTXQLEN:
3662 case SIOCSMIIREG:
3663 case SIOCBONDENSLAVE:
3664 case SIOCBONDRELEASE:
3665 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003666 case SIOCBONDCHANGEACTIVE:
3667 case SIOCBRADDIF:
3668 case SIOCBRDELIF:
3669 if (!capable(CAP_NET_ADMIN))
3670 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003671 /* fall through */
3672 case SIOCBONDSLAVEINFOQUERY:
3673 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003674 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003676 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 rtnl_unlock();
3678 return ret;
3679
3680 case SIOCGIFMEM:
3681 /* Get the per device memory space. We can add this but
3682 * currently do not support it */
3683 case SIOCSIFMEM:
3684 /* Set the per device memory buffer space.
3685 * Not applicable in our case */
3686 case SIOCSIFLINK:
3687 return -EINVAL;
3688
3689 /*
3690 * Unknown or private ioctl.
3691 */
3692 default:
3693 if (cmd == SIOCWANDEV ||
3694 (cmd >= SIOCDEVPRIVATE &&
3695 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003696 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003698 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 rtnl_unlock();
3700 if (!ret && copy_to_user(arg, &ifr,
3701 sizeof(struct ifreq)))
3702 ret = -EFAULT;
3703 return ret;
3704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003706 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003707 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 return -EINVAL;
3709 }
3710}
3711
3712
3713/**
3714 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003715 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 *
3717 * Returns a suitable unique value for a new device interface
3718 * number. The caller must hold the rtnl semaphore or the
3719 * dev_base_lock to be sure it remains unique.
3720 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003721static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722{
3723 static int ifindex;
3724 for (;;) {
3725 if (++ifindex <= 0)
3726 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003727 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 return ifindex;
3729 }
3730}
3731
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732/* Delayed registration/unregisteration */
3733static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003734static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003736static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737{
3738 spin_lock(&net_todo_list_lock);
3739 list_add_tail(&dev->todo_list, &net_todo_list);
3740 spin_unlock(&net_todo_list_lock);
3741}
3742
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003743static void rollback_registered(struct net_device *dev)
3744{
3745 BUG_ON(dev_boot_phase);
3746 ASSERT_RTNL();
3747
3748 /* Some devices call without registering for initialization unwind. */
3749 if (dev->reg_state == NETREG_UNINITIALIZED) {
3750 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3751 "was registered\n", dev->name, dev);
3752
3753 WARN_ON(1);
3754 return;
3755 }
3756
3757 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3758
3759 /* If device is running, close it first. */
3760 dev_close(dev);
3761
3762 /* And unlink it from device chain. */
3763 unlist_netdevice(dev);
3764
3765 dev->reg_state = NETREG_UNREGISTERING;
3766
3767 synchronize_net();
3768
3769 /* Shutdown queueing discipline. */
3770 dev_shutdown(dev);
3771
3772
3773 /* Notify protocols, that we are about to destroy
3774 this device. They should clean all the things.
3775 */
3776 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3777
3778 /*
3779 * Flush the unicast and multicast chains
3780 */
3781 dev_addr_discard(dev);
3782
3783 if (dev->uninit)
3784 dev->uninit(dev);
3785
3786 /* Notifier chain MUST detach us from master device. */
3787 BUG_TRAP(!dev->master);
3788
3789 /* Remove entries from kobject tree */
3790 netdev_unregister_kobject(dev);
3791
3792 synchronize_net();
3793
3794 dev_put(dev);
3795}
3796
David S. Millere8a04642008-07-17 00:34:19 -07003797static void __netdev_init_queue_locks_one(struct net_device *dev,
3798 struct netdev_queue *dev_queue,
3799 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07003800{
3801 spin_lock_init(&dev_queue->_xmit_lock);
3802 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3803 dev_queue->xmit_lock_owner = -1;
3804}
3805
3806static void netdev_init_queue_locks(struct net_device *dev)
3807{
David S. Millere8a04642008-07-17 00:34:19 -07003808 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3809 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07003810}
3811
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812/**
3813 * register_netdevice - register a network device
3814 * @dev: device to register
3815 *
3816 * Take a completed network device structure and add it to the kernel
3817 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3818 * chain. 0 is returned on success. A negative errno code is returned
3819 * on a failure to set up the device, or if the name is a duplicate.
3820 *
3821 * Callers must hold the rtnl semaphore. You may want
3822 * register_netdev() instead of this.
3823 *
3824 * BUGS:
3825 * The locking appears insufficient to guarantee two parallel registers
3826 * will not get the same name.
3827 */
3828
3829int register_netdevice(struct net_device *dev)
3830{
3831 struct hlist_head *head;
3832 struct hlist_node *p;
3833 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003834 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835
3836 BUG_ON(dev_boot_phase);
3837 ASSERT_RTNL();
3838
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003839 might_sleep();
3840
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841 /* When net_device's are persistent, this will be fatal. */
3842 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003843 BUG_ON(!dev_net(dev));
3844 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845
David S. Millerf1f28aa2008-07-15 00:08:33 -07003846 spin_lock_init(&dev->addr_list_lock);
David S. Millerc773e842008-07-08 23:13:53 -07003847 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 dev->iflink = -1;
3850
3851 /* Init, if this function is available */
3852 if (dev->init) {
3853 ret = dev->init(dev);
3854 if (ret) {
3855 if (ret > 0)
3856 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003857 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 }
3859 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003860
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 if (!dev_valid_name(dev->name)) {
3862 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003863 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 }
3865
Eric W. Biederman881d9662007-09-17 11:56:21 -07003866 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 if (dev->iflink == -1)
3868 dev->iflink = dev->ifindex;
3869
3870 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003871 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003872 hlist_for_each(p, head) {
3873 struct net_device *d
3874 = hlist_entry(p, struct net_device, name_hlist);
3875 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3876 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003877 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003881 /* Fix illegal checksum combinations */
3882 if ((dev->features & NETIF_F_HW_CSUM) &&
3883 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3884 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3885 dev->name);
3886 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3887 }
3888
3889 if ((dev->features & NETIF_F_NO_CSUM) &&
3890 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3891 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3892 dev->name);
3893 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3894 }
3895
3896
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 /* Fix illegal SG+CSUM combinations. */
3898 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003899 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003900 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 dev->name);
3902 dev->features &= ~NETIF_F_SG;
3903 }
3904
3905 /* TSO requires that SG is present as well. */
3906 if ((dev->features & NETIF_F_TSO) &&
3907 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003908 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 dev->name);
3910 dev->features &= ~NETIF_F_TSO;
3911 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003912 if (dev->features & NETIF_F_UFO) {
3913 if (!(dev->features & NETIF_F_HW_CSUM)) {
3914 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3915 "NETIF_F_HW_CSUM feature.\n",
3916 dev->name);
3917 dev->features &= ~NETIF_F_UFO;
3918 }
3919 if (!(dev->features & NETIF_F_SG)) {
3920 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3921 "NETIF_F_SG feature.\n",
3922 dev->name);
3923 dev->features &= ~NETIF_F_UFO;
3924 }
3925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07003927 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07003928 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003929 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003930 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003931 dev->reg_state = NETREG_REGISTERED;
3932
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 /*
3934 * Default initial state at registry is that the
3935 * device is present.
3936 */
3937
3938 set_bit(__LINK_STATE_PRESENT, &dev->state);
3939
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02003942 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943
3944 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003945 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07003946 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003947 if (ret) {
3948 rollback_registered(dev);
3949 dev->reg_state = NETREG_UNREGISTERED;
3950 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
3952out:
3953 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003954
3955err_uninit:
3956 if (dev->uninit)
3957 dev->uninit(dev);
3958 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959}
3960
3961/**
3962 * register_netdev - register a network device
3963 * @dev: device to register
3964 *
3965 * Take a completed network device structure and add it to the kernel
3966 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3967 * chain. 0 is returned on success. A negative errno code is returned
3968 * on a failure to set up the device, or if the name is a duplicate.
3969 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07003970 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 * and expands the device name if you passed a format string to
3972 * alloc_netdev.
3973 */
3974int register_netdev(struct net_device *dev)
3975{
3976 int err;
3977
3978 rtnl_lock();
3979
3980 /*
3981 * If the name is a format string the caller wants us to do a
3982 * name allocation.
3983 */
3984 if (strchr(dev->name, '%')) {
3985 err = dev_alloc_name(dev, dev->name);
3986 if (err < 0)
3987 goto out;
3988 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003989
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 err = register_netdevice(dev);
3991out:
3992 rtnl_unlock();
3993 return err;
3994}
3995EXPORT_SYMBOL(register_netdev);
3996
3997/*
3998 * netdev_wait_allrefs - wait until all references are gone.
3999 *
4000 * This is called when unregistering network devices.
4001 *
4002 * Any protocol or device that holds a reference should register
4003 * for netdevice notification, and cleanup and put back the
4004 * reference if they receive an UNREGISTER event.
4005 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004006 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 */
4008static void netdev_wait_allrefs(struct net_device *dev)
4009{
4010 unsigned long rebroadcast_time, warning_time;
4011
4012 rebroadcast_time = warning_time = jiffies;
4013 while (atomic_read(&dev->refcnt) != 0) {
4014 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004015 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004016
4017 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004018 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
4020 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4021 &dev->state)) {
4022 /* We must not have linkwatch events
4023 * pending on unregister. If this
4024 * happens, we simply run the queue
4025 * unscheduled, resulting in a noop
4026 * for this device.
4027 */
4028 linkwatch_run_queue();
4029 }
4030
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004031 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032
4033 rebroadcast_time = jiffies;
4034 }
4035
4036 msleep(250);
4037
4038 if (time_after(jiffies, warning_time + 10 * HZ)) {
4039 printk(KERN_EMERG "unregister_netdevice: "
4040 "waiting for %s to become free. Usage "
4041 "count = %d\n",
4042 dev->name, atomic_read(&dev->refcnt));
4043 warning_time = jiffies;
4044 }
4045 }
4046}
4047
4048/* The sequence is:
4049 *
4050 * rtnl_lock();
4051 * ...
4052 * register_netdevice(x1);
4053 * register_netdevice(x2);
4054 * ...
4055 * unregister_netdevice(y1);
4056 * unregister_netdevice(y2);
4057 * ...
4058 * rtnl_unlock();
4059 * free_netdev(y1);
4060 * free_netdev(y2);
4061 *
4062 * We are invoked by rtnl_unlock() after it drops the semaphore.
4063 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004064 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 * without deadlocking with linkwatch via keventd.
4066 * 2) Since we run with the RTNL semaphore not held, we can sleep
4067 * safely in order to wait for the netdev refcnt to drop to zero.
4068 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004069static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070void netdev_run_todo(void)
4071{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004072 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073
4074 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004075 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
4077 /* Not safe to do outside the semaphore. We must not return
4078 * until all unregister events invoked by the local processor
4079 * have been completed (either by this todo run, or one on
4080 * another cpu).
4081 */
4082 if (list_empty(&net_todo_list))
4083 goto out;
4084
4085 /* Snapshot list, allow later requests */
4086 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004087 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004089
Linus Torvalds1da177e2005-04-16 15:20:36 -07004090 while (!list_empty(&list)) {
4091 struct net_device *dev
4092 = list_entry(list.next, struct net_device, todo_list);
4093 list_del(&dev->todo_list);
4094
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004095 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 printk(KERN_ERR "network todo '%s' but state %d\n",
4097 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004098 dump_stack();
4099 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004101
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004102 dev->reg_state = NETREG_UNREGISTERED;
4103
4104 netdev_wait_allrefs(dev);
4105
4106 /* paranoia */
4107 BUG_ON(atomic_read(&dev->refcnt));
4108 BUG_TRAP(!dev->ip_ptr);
4109 BUG_TRAP(!dev->ip6_ptr);
4110 BUG_TRAP(!dev->dn_ptr);
4111
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004112 if (dev->destructor)
4113 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004114
4115 /* Free network device */
4116 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 }
4118
4119out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004120 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121}
4122
Rusty Russell5a1b5892007-04-28 21:04:03 -07004123static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004124{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004125 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004126}
4127
David S. Millerdc2b4842008-07-08 17:18:23 -07004128static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004129 struct netdev_queue *queue,
4130 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004131{
4132 spin_lock_init(&queue->lock);
4133 queue->dev = dev;
4134}
4135
David S. Millerbb949fb2008-07-08 16:55:56 -07004136static void netdev_init_queues(struct net_device *dev)
4137{
David S. Millere8a04642008-07-17 00:34:19 -07004138 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4139 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerbb949fb2008-07-08 16:55:56 -07004140}
4141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004143 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 * @sizeof_priv: size of private data to allocate space for
4145 * @name: device name format string
4146 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004147 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148 *
4149 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004150 * and performs basic initialization. Also allocates subquue structs
4151 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004152 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004153struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4154 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155{
David S. Millere8a04642008-07-17 00:34:19 -07004156 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 struct net_device *dev;
4158 int alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004159 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004161 BUG_ON(strlen(name) >= sizeof(dev->name));
4162
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004163 alloc_size = sizeof(struct net_device) +
4164 sizeof(struct net_device_subqueue) * (queue_count - 1);
4165 if (sizeof_priv) {
4166 /* ensure 32-byte alignment of private area */
4167 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4168 alloc_size += sizeof_priv;
4169 }
4170 /* ensure 32-byte alignment of whole construct */
4171 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004173 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004175 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 return NULL;
4177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
David S. Millere8a04642008-07-17 00:34:19 -07004179 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4180 if (!tx) {
4181 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4182 "tx qdiscs.\n");
4183 kfree(p);
4184 return NULL;
4185 }
4186
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 dev = (struct net_device *)
4188 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4189 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004190 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191
David S. Millere8a04642008-07-17 00:34:19 -07004192 dev->_tx = tx;
4193 dev->num_tx_queues = queue_count;
4194
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004195 if (sizeof_priv) {
4196 dev->priv = ((char *)dev +
4197 ((sizeof(struct net_device) +
4198 (sizeof(struct net_device_subqueue) *
Patrick McHardy31ce72a2007-07-20 19:45:45 -07004199 (queue_count - 1)) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004200 & ~NETDEV_ALIGN_CONST));
4201 }
4202
4203 dev->egress_subqueue_count = queue_count;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004204 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205
David S. Millerbb949fb2008-07-08 16:55:56 -07004206 netdev_init_queues(dev);
4207
Rusty Russell5a1b5892007-04-28 21:04:03 -07004208 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004209 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 setup(dev);
4211 strcpy(dev->name, name);
4212 return dev;
4213}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004214EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
4216/**
4217 * free_netdev - free network device
4218 * @dev: device
4219 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004220 * This function does the last stage of destroying an allocated device
4221 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 * If this is the last reference then it will be freed.
4223 */
4224void free_netdev(struct net_device *dev)
4225{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004226 release_net(dev_net(dev));
4227
David S. Millere8a04642008-07-17 00:34:19 -07004228 kfree(dev->_tx);
4229
Stephen Hemminger3041a062006-05-26 13:25:24 -07004230 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 if (dev->reg_state == NETREG_UNINITIALIZED) {
4232 kfree((char *)dev - dev->padded);
4233 return;
4234 }
4235
4236 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4237 dev->reg_state = NETREG_RELEASED;
4238
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004239 /* will free via device release */
4240 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004242
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004244void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245{
4246 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004247 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248}
4249
4250/**
4251 * unregister_netdevice - remove device from the kernel
4252 * @dev: device
4253 *
4254 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004255 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 *
4257 * Callers must hold the rtnl semaphore. You may want
4258 * unregister_netdev() instead of this.
4259 */
4260
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004261void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262{
Herbert Xua6620712007-12-12 19:21:56 -08004263 ASSERT_RTNL();
4264
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004265 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 /* Finish processing unregister after unlock */
4267 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268}
4269
4270/**
4271 * unregister_netdev - remove device from the kernel
4272 * @dev: device
4273 *
4274 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004275 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 *
4277 * This is just a wrapper for unregister_netdevice that takes
4278 * the rtnl semaphore. In general you want to use this and not
4279 * unregister_netdevice.
4280 */
4281void unregister_netdev(struct net_device *dev)
4282{
4283 rtnl_lock();
4284 unregister_netdevice(dev);
4285 rtnl_unlock();
4286}
4287
4288EXPORT_SYMBOL(unregister_netdev);
4289
Eric W. Biedermance286d32007-09-12 13:53:49 +02004290/**
4291 * dev_change_net_namespace - move device to different nethost namespace
4292 * @dev: device
4293 * @net: network namespace
4294 * @pat: If not NULL name pattern to try if the current device name
4295 * is already taken in the destination network namespace.
4296 *
4297 * This function shuts down a device interface and moves it
4298 * to a new network namespace. On success 0 is returned, on
4299 * a failure a netagive errno code is returned.
4300 *
4301 * Callers must hold the rtnl semaphore.
4302 */
4303
4304int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4305{
4306 char buf[IFNAMSIZ];
4307 const char *destname;
4308 int err;
4309
4310 ASSERT_RTNL();
4311
4312 /* Don't allow namespace local devices to be moved. */
4313 err = -EINVAL;
4314 if (dev->features & NETIF_F_NETNS_LOCAL)
4315 goto out;
4316
4317 /* Ensure the device has been registrered */
4318 err = -EINVAL;
4319 if (dev->reg_state != NETREG_REGISTERED)
4320 goto out;
4321
4322 /* Get out if there is nothing todo */
4323 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004324 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004325 goto out;
4326
4327 /* Pick the destination device name, and ensure
4328 * we can use it in the destination network namespace.
4329 */
4330 err = -EEXIST;
4331 destname = dev->name;
4332 if (__dev_get_by_name(net, destname)) {
4333 /* We get here if we can't use the current device name */
4334 if (!pat)
4335 goto out;
4336 if (!dev_valid_name(pat))
4337 goto out;
4338 if (strchr(pat, '%')) {
4339 if (__dev_alloc_name(net, pat, buf) < 0)
4340 goto out;
4341 destname = buf;
4342 } else
4343 destname = pat;
4344 if (__dev_get_by_name(net, destname))
4345 goto out;
4346 }
4347
4348 /*
4349 * And now a mini version of register_netdevice unregister_netdevice.
4350 */
4351
4352 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004353 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004354
4355 /* And unlink it from device chain */
4356 err = -ENODEV;
4357 unlist_netdevice(dev);
4358
4359 synchronize_net();
4360
4361 /* Shutdown queueing discipline. */
4362 dev_shutdown(dev);
4363
4364 /* Notify protocols, that we are about to destroy
4365 this device. They should clean all the things.
4366 */
4367 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4368
4369 /*
4370 * Flush the unicast and multicast chains
4371 */
4372 dev_addr_discard(dev);
4373
4374 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004375 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004376
4377 /* Assign the new device name */
4378 if (destname != dev->name)
4379 strcpy(dev->name, destname);
4380
4381 /* If there is an ifindex conflict assign a new one */
4382 if (__dev_get_by_index(net, dev->ifindex)) {
4383 int iflink = (dev->iflink == dev->ifindex);
4384 dev->ifindex = dev_new_index(net);
4385 if (iflink)
4386 dev->iflink = dev->ifindex;
4387 }
4388
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004389 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004390 netdev_unregister_kobject(dev);
4391 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004392 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004393
4394 /* Add the device back in the hashes */
4395 list_netdevice(dev);
4396
4397 /* Notify protocols, that a new device appeared. */
4398 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4399
4400 synchronize_net();
4401 err = 0;
4402out:
4403 return err;
4404}
4405
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406static int dev_cpu_callback(struct notifier_block *nfb,
4407 unsigned long action,
4408 void *ocpu)
4409{
4410 struct sk_buff **list_skb;
David S. Milleree609cb2008-07-08 22:58:37 -07004411 struct netdev_queue **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412 struct sk_buff *skb;
4413 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4414 struct softnet_data *sd, *oldsd;
4415
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004416 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 return NOTIFY_OK;
4418
4419 local_irq_disable();
4420 cpu = smp_processor_id();
4421 sd = &per_cpu(softnet_data, cpu);
4422 oldsd = &per_cpu(softnet_data, oldcpu);
4423
4424 /* Find end of our completion_queue. */
4425 list_skb = &sd->completion_queue;
4426 while (*list_skb)
4427 list_skb = &(*list_skb)->next;
4428 /* Append completion queue from offline CPU. */
4429 *list_skb = oldsd->completion_queue;
4430 oldsd->completion_queue = NULL;
4431
4432 /* Find end of our output_queue. */
4433 list_net = &sd->output_queue;
4434 while (*list_net)
4435 list_net = &(*list_net)->next_sched;
4436 /* Append output queue from offline CPU. */
4437 *list_net = oldsd->output_queue;
4438 oldsd->output_queue = NULL;
4439
4440 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4441 local_irq_enable();
4442
4443 /* Process offline CPU's input_pkt_queue */
4444 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4445 netif_rx(skb);
4446
4447 return NOTIFY_OK;
4448}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449
Chris Leechdb217332006-06-17 21:24:58 -07004450#ifdef CONFIG_NET_DMA
4451/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004452 * net_dma_rebalance - try to maintain one DMA channel per CPU
4453 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4454 *
4455 * This is called when the number of channels allocated to the net_dma client
4456 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004457 */
Dan Williamsd379b012007-07-09 11:56:42 -07004458
4459static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004460{
Dan Williamsd379b012007-07-09 11:56:42 -07004461 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004462 struct dma_chan *chan;
4463
Dan Williamsd379b012007-07-09 11:56:42 -07004464 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004465 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004466 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004467 return;
4468 }
4469
4470 i = 0;
4471 cpu = first_cpu(cpu_online_map);
4472
Dan Williamsd379b012007-07-09 11:56:42 -07004473 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4474 chan = net_dma->channels[chan_idx];
4475
4476 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4477 + (i < (num_online_cpus() %
4478 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004479
4480 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004481 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004482 cpu = next_cpu(cpu, cpu_online_map);
4483 n--;
4484 }
4485 i++;
4486 }
Chris Leechdb217332006-06-17 21:24:58 -07004487}
4488
4489/**
4490 * netdev_dma_event - event callback for the net_dma_client
4491 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004492 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004493 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004494 */
Dan Williamsd379b012007-07-09 11:56:42 -07004495static enum dma_state_client
4496netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4497 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004498{
Dan Williamsd379b012007-07-09 11:56:42 -07004499 int i, found = 0, pos = -1;
4500 struct net_dma *net_dma =
4501 container_of(client, struct net_dma, client);
4502 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4503
4504 spin_lock(&net_dma->lock);
4505 switch (state) {
4506 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004507 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004508 if (net_dma->channels[i] == chan) {
4509 found = 1;
4510 break;
4511 } else if (net_dma->channels[i] == NULL && pos < 0)
4512 pos = i;
4513
4514 if (!found && pos >= 0) {
4515 ack = DMA_ACK;
4516 net_dma->channels[pos] = chan;
4517 cpu_set(pos, net_dma->channel_mask);
4518 net_dma_rebalance(net_dma);
4519 }
Chris Leechdb217332006-06-17 21:24:58 -07004520 break;
4521 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004522 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004523 if (net_dma->channels[i] == chan) {
4524 found = 1;
4525 pos = i;
4526 break;
4527 }
4528
4529 if (found) {
4530 ack = DMA_ACK;
4531 cpu_clear(pos, net_dma->channel_mask);
4532 net_dma->channels[i] = NULL;
4533 net_dma_rebalance(net_dma);
4534 }
Chris Leechdb217332006-06-17 21:24:58 -07004535 break;
4536 default:
4537 break;
4538 }
Dan Williamsd379b012007-07-09 11:56:42 -07004539 spin_unlock(&net_dma->lock);
4540
4541 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004542}
4543
4544/**
4545 * netdev_dma_regiser - register the networking subsystem as a DMA client
4546 */
4547static int __init netdev_dma_register(void)
4548{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004549 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4550 GFP_KERNEL);
4551 if (unlikely(!net_dma.channels)) {
4552 printk(KERN_NOTICE
4553 "netdev_dma: no memory for net_dma.channels\n");
4554 return -ENOMEM;
4555 }
Dan Williamsd379b012007-07-09 11:56:42 -07004556 spin_lock_init(&net_dma.lock);
4557 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4558 dma_async_client_register(&net_dma.client);
4559 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004560 return 0;
4561}
4562
4563#else
4564static int __init netdev_dma_register(void) { return -ENODEV; }
4565#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566
Herbert Xu7f353bf2007-08-10 15:47:58 -07004567/**
4568 * netdev_compute_feature - compute conjunction of two feature sets
4569 * @all: first feature set
4570 * @one: second feature set
4571 *
4572 * Computes a new feature set after adding a device with feature set
4573 * @one to the master device with current feature set @all. Returns
4574 * the new feature set.
4575 */
4576int netdev_compute_features(unsigned long all, unsigned long one)
4577{
4578 /* if device needs checksumming, downgrade to hw checksumming */
4579 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4580 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4581
4582 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4583 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4584 all ^= NETIF_F_HW_CSUM
4585 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4586
4587 if (one & NETIF_F_GSO)
4588 one |= NETIF_F_GSO_SOFTWARE;
4589 one |= NETIF_F_GSO;
4590
4591 /* If even one device supports robust GSO, enable it for all. */
4592 if (one & NETIF_F_GSO_ROBUST)
4593 all |= NETIF_F_GSO_ROBUST;
4594
4595 all &= one | NETIF_F_LLTX;
4596
4597 if (!(all & NETIF_F_ALL_CSUM))
4598 all &= ~NETIF_F_SG;
4599 if (!(all & NETIF_F_SG))
4600 all &= ~NETIF_F_GSO_MASK;
4601
4602 return all;
4603}
4604EXPORT_SYMBOL(netdev_compute_features);
4605
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004606static struct hlist_head *netdev_create_hash(void)
4607{
4608 int i;
4609 struct hlist_head *hash;
4610
4611 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4612 if (hash != NULL)
4613 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4614 INIT_HLIST_HEAD(&hash[i]);
4615
4616 return hash;
4617}
4618
Eric W. Biederman881d9662007-09-17 11:56:21 -07004619/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004620static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004621{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004622 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004623
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004624 net->dev_name_head = netdev_create_hash();
4625 if (net->dev_name_head == NULL)
4626 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004627
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004628 net->dev_index_head = netdev_create_hash();
4629 if (net->dev_index_head == NULL)
4630 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004631
4632 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004633
4634err_idx:
4635 kfree(net->dev_name_head);
4636err_name:
4637 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004638}
4639
Pavel Emelyanov46650792007-10-08 20:38:39 -07004640static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004641{
4642 kfree(net->dev_name_head);
4643 kfree(net->dev_index_head);
4644}
4645
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004646static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004647 .init = netdev_init,
4648 .exit = netdev_exit,
4649};
4650
Pavel Emelyanov46650792007-10-08 20:38:39 -07004651static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004652{
4653 struct net_device *dev, *next;
4654 /*
4655 * Push all migratable of the network devices back to the
4656 * initial network namespace
4657 */
4658 rtnl_lock();
4659 for_each_netdev_safe(net, dev, next) {
4660 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004661 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004662
4663 /* Ignore unmoveable devices (i.e. loopback) */
4664 if (dev->features & NETIF_F_NETNS_LOCAL)
4665 continue;
4666
4667 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004668 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4669 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004670 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004671 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004672 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004673 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004674 }
4675 }
4676 rtnl_unlock();
4677}
4678
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004679static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004680 .exit = default_device_exit,
4681};
4682
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683/*
4684 * Initialize the DEV module. At boot time this walks the device list and
4685 * unhooks any devices that fail to initialise (normally hardware not
4686 * present) and leaves us with a valid list of present and active devices.
4687 *
4688 */
4689
4690/*
4691 * This is called single threaded during boot, so no need
4692 * to take the rtnl semaphore.
4693 */
4694static int __init net_dev_init(void)
4695{
4696 int i, rc = -ENOMEM;
4697
4698 BUG_ON(!dev_boot_phase);
4699
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 if (dev_proc_init())
4701 goto out;
4702
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004703 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 goto out;
4705
4706 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004707 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 INIT_LIST_HEAD(&ptype_base[i]);
4709
Eric W. Biederman881d9662007-09-17 11:56:21 -07004710 if (register_pernet_subsys(&netdev_net_ops))
4711 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
Eric W. Biedermance286d32007-09-12 13:53:49 +02004713 if (register_pernet_device(&default_device_ops))
4714 goto out;
4715
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 /*
4717 * Initialise the packet receive queues.
4718 */
4719
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004720 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 struct softnet_data *queue;
4722
4723 queue = &per_cpu(softnet_data, i);
4724 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725 queue->completion_queue = NULL;
4726 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004727
4728 queue->backlog.poll = process_backlog;
4729 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730 }
4731
Chris Leechdb217332006-06-17 21:24:58 -07004732 netdev_dma_register();
4733
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 dev_boot_phase = 0;
4735
4736 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4737 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4738
4739 hotcpu_notifier(dev_cpu_callback, 0);
4740 dst_init();
4741 dev_mcast_init();
4742 rc = 0;
4743out:
4744 return rc;
4745}
4746
4747subsys_initcall(net_dev_init);
4748
4749EXPORT_SYMBOL(__dev_get_by_index);
4750EXPORT_SYMBOL(__dev_get_by_name);
4751EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004752EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753EXPORT_SYMBOL(dev_add_pack);
4754EXPORT_SYMBOL(dev_alloc_name);
4755EXPORT_SYMBOL(dev_close);
4756EXPORT_SYMBOL(dev_get_by_flags);
4757EXPORT_SYMBOL(dev_get_by_index);
4758EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759EXPORT_SYMBOL(dev_open);
4760EXPORT_SYMBOL(dev_queue_xmit);
4761EXPORT_SYMBOL(dev_remove_pack);
4762EXPORT_SYMBOL(dev_set_allmulti);
4763EXPORT_SYMBOL(dev_set_promiscuity);
4764EXPORT_SYMBOL(dev_change_flags);
4765EXPORT_SYMBOL(dev_set_mtu);
4766EXPORT_SYMBOL(dev_set_mac_address);
4767EXPORT_SYMBOL(free_netdev);
4768EXPORT_SYMBOL(netdev_boot_setup_check);
4769EXPORT_SYMBOL(netdev_set_master);
4770EXPORT_SYMBOL(netdev_state_change);
4771EXPORT_SYMBOL(netif_receive_skb);
4772EXPORT_SYMBOL(netif_rx);
4773EXPORT_SYMBOL(register_gifconf);
4774EXPORT_SYMBOL(register_netdevice);
4775EXPORT_SYMBOL(register_netdevice_notifier);
4776EXPORT_SYMBOL(skb_checksum_help);
4777EXPORT_SYMBOL(synchronize_net);
4778EXPORT_SYMBOL(unregister_netdevice);
4779EXPORT_SYMBOL(unregister_netdevice_notifier);
4780EXPORT_SYMBOL(net_enable_timestamp);
4781EXPORT_SYMBOL(net_disable_timestamp);
4782EXPORT_SYMBOL(dev_get_flags);
4783
4784#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4785EXPORT_SYMBOL(br_handle_frame_hook);
4786EXPORT_SYMBOL(br_fdb_get_hook);
4787EXPORT_SYMBOL(br_fdb_put_hook);
4788#endif
4789
4790#ifdef CONFIG_KMOD
4791EXPORT_SYMBOL(dev_load);
4792#endif
4793
4794EXPORT_PER_CPU_SYMBOL(softnet_data);