blob: d6b8d3c3e6ec33bdf428de8efb73901fcd1e18b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700125#include "net-sysfs.h"
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/*
128 * The list of packet types we will receive (as opposed to discard)
129 * and the routines to invoke.
130 *
131 * Why 16. Because with 16 the only overlap we get on a hash of the
132 * low nibble of the protocol value is RARP/SNAP/X.25.
133 *
134 * NOTE: That is no longer true with the addition of VLAN tags. Not
135 * sure which should go first, but I bet it won't make much
136 * difference if we are running VLANs. The good news is that
137 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700138 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 * --BLG
140 *
141 * 0800 IP
142 * 8100 802.1Q VLAN
143 * 0001 802.3
144 * 0002 AX.25
145 * 0004 802.2
146 * 8035 RARP
147 * 0005 SNAP
148 * 0805 X.25
149 * 0806 ARP
150 * 8137 IPX
151 * 0009 Localtalk
152 * 86DD IPv6
153 */
154
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800155#define PTYPE_HASH_SIZE (16)
156#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800159static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700160static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Chris Leechdb217332006-06-17 21:24:58 -0700162#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700163struct net_dma {
164 struct dma_client client;
165 spinlock_t lock;
166 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700167 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700168};
169
170static enum dma_state_client
171netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
172 enum dma_state state);
173
174static struct net_dma net_dma = {
175 .client = {
176 .event_callback = netdev_dma_event,
177 },
178};
Chris Leechdb217332006-06-17 21:24:58 -0700179#endif
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * semaphore.
184 *
185 * Pure readers hold dev_base_lock for reading.
186 *
187 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700188 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
191 *
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
195 *
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
198 * semaphore held.
199 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200DEFINE_RWLOCK(dev_base_lock);
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202EXPORT_SYMBOL(dev_base_lock);
203
204#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Eric W. Biedermance286d32007-09-12 13:53:49 +0200218/* Device list insertion */
219static int list_netdevice(struct net_device *dev)
220{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900221 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200222
223 ASSERT_RTNL();
224
225 write_lock_bh(&dev_base_lock);
226 list_add_tail(&dev->dev_list, &net->dev_base_head);
227 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
228 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
229 write_unlock_bh(&dev_base_lock);
230 return 0;
231}
232
233/* Device list removal */
234static void unlist_netdevice(struct net_device *dev)
235{
236 ASSERT_RTNL();
237
238 /* Unlink dev from the device chain */
239 write_lock_bh(&dev_base_lock);
240 list_del(&dev->dev_list);
241 hlist_del(&dev->name_hlist);
242 hlist_del(&dev->index_hlist);
243 write_unlock_bh(&dev_base_lock);
244}
245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246/*
247 * Our notifier list
248 */
249
Alan Sternf07d5b92006-05-09 15:23:03 -0700250static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252/*
253 * Device drivers call our routines to queue packets here. We empty the
254 * queue in the local softnet handler.
255 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700256
257DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700259#ifdef CONFIG_DEBUG_LOCK_ALLOC
260/*
261 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
262 * according to dev->type
263 */
264static const unsigned short netdev_lock_type[] =
265 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
266 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
267 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
268 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
269 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
270 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
271 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
272 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
273 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
274 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
275 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
276 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
277 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
278 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
279 ARPHRD_NONE};
280
281static const char *netdev_lock_name[] =
282 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
283 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
284 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
285 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
286 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
287 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
288 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
289 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
290 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
291 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
292 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
293 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
294 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
295 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
296 "_xmit_NONE"};
297
298static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
299
300static inline unsigned short netdev_lock_pos(unsigned short dev_type)
301{
302 int i;
303
304 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
305 if (netdev_lock_type[i] == dev_type)
306 return i;
307 /* the last key is used by default */
308 return ARRAY_SIZE(netdev_lock_type) - 1;
309}
310
311static inline void netdev_set_lockdep_class(spinlock_t *lock,
312 unsigned short dev_type)
313{
314 int i;
315
316 i = netdev_lock_pos(dev_type);
317 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
318 netdev_lock_name[i]);
319}
320#else
321static inline void netdev_set_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
323{
324}
325#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327/*******************************************************************************
328
329 Protocol management and registration routines
330
331*******************************************************************************/
332
333/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 * Add a protocol ID to the list. Now that the input handler is
335 * smarter we can dispense with all the messy stuff that used to be
336 * here.
337 *
338 * BEWARE!!! Protocol handlers, mangling input packets,
339 * MUST BE last in hash buckets and checking protocol handlers
340 * MUST start from promiscuous ptype_all chain in net_bh.
341 * It is true now, do not change it.
342 * Explanation follows: if protocol handler, mangling packet, will
343 * be the first on list, it is not able to sense, that packet
344 * is cloned and should be copied-on-write, so that it will
345 * change it and subsequent readers will get broken packet.
346 * --ANK (980803)
347 */
348
349/**
350 * dev_add_pack - add packet handler
351 * @pt: packet type declaration
352 *
353 * Add a protocol handler to the networking stack. The passed &packet_type
354 * is linked into kernel lists and may not be freed until it has been
355 * removed from the kernel lists.
356 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900357 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 * guarantee all CPU's that are in middle of receiving packets
359 * will see the new packet type (until the next received packet).
360 */
361
362void dev_add_pack(struct packet_type *pt)
363{
364 int hash;
365
366 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700367 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700369 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800370 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 list_add_rcu(&pt->list, &ptype_base[hash]);
372 }
373 spin_unlock_bh(&ptype_lock);
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/**
377 * __dev_remove_pack - remove packet handler
378 * @pt: packet type declaration
379 *
380 * Remove a protocol handler that was previously added to the kernel
381 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
382 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900383 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * The packet type might still be in use by receivers
386 * and must not be freed until after all the CPU's have gone
387 * through a quiescent state.
388 */
389void __dev_remove_pack(struct packet_type *pt)
390{
391 struct list_head *head;
392 struct packet_type *pt1;
393
394 spin_lock_bh(&ptype_lock);
395
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700396 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700398 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800399 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 list_for_each_entry(pt1, head, list) {
402 if (pt == pt1) {
403 list_del_rcu(&pt->list);
404 goto out;
405 }
406 }
407
408 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
409out:
410 spin_unlock_bh(&ptype_lock);
411}
412/**
413 * dev_remove_pack - remove packet handler
414 * @pt: packet type declaration
415 *
416 * Remove a protocol handler that was previously added to the kernel
417 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
418 * from the kernel lists and can be freed or reused once this function
419 * returns.
420 *
421 * This call sleeps to guarantee that no CPU is looking at the packet
422 * type after return.
423 */
424void dev_remove_pack(struct packet_type *pt)
425{
426 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 synchronize_net();
429}
430
431/******************************************************************************
432
433 Device Boot-time Settings Routines
434
435*******************************************************************************/
436
437/* Boot time configuration table */
438static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
439
440/**
441 * netdev_boot_setup_add - add new setup entry
442 * @name: name of the device
443 * @map: configured settings for the device
444 *
445 * Adds new setup entry to the dev_boot_setup list. The function
446 * returns 0 on error and 1 on success. This is a generic routine to
447 * all netdevices.
448 */
449static int netdev_boot_setup_add(char *name, struct ifmap *map)
450{
451 struct netdev_boot_setup *s;
452 int i;
453
454 s = dev_boot_setup;
455 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
456 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
457 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700458 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 memcpy(&s[i].map, map, sizeof(s[i].map));
460 break;
461 }
462 }
463
464 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
465}
466
467/**
468 * netdev_boot_setup_check - check boot time settings
469 * @dev: the netdevice
470 *
471 * Check boot time settings for the device.
472 * The found settings are set for the device to be used
473 * later in the device probing.
474 * Returns 0 if no settings found, 1 if they are.
475 */
476int netdev_boot_setup_check(struct net_device *dev)
477{
478 struct netdev_boot_setup *s = dev_boot_setup;
479 int i;
480
481 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
482 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700483 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 dev->irq = s[i].map.irq;
485 dev->base_addr = s[i].map.base_addr;
486 dev->mem_start = s[i].map.mem_start;
487 dev->mem_end = s[i].map.mem_end;
488 return 1;
489 }
490 }
491 return 0;
492}
493
494
495/**
496 * netdev_boot_base - get address from boot time settings
497 * @prefix: prefix for network device
498 * @unit: id for network device
499 *
500 * Check boot time settings for the base address of device.
501 * The found settings are set for the device to be used
502 * later in the device probing.
503 * Returns 0 if no settings found.
504 */
505unsigned long netdev_boot_base(const char *prefix, int unit)
506{
507 const struct netdev_boot_setup *s = dev_boot_setup;
508 char name[IFNAMSIZ];
509 int i;
510
511 sprintf(name, "%s%d", prefix, unit);
512
513 /*
514 * If device already registered then return base of 1
515 * to indicate not to probe for this interface
516 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700517 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 return 1;
519
520 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
521 if (!strcmp(name, s[i].name))
522 return s[i].map.base_addr;
523 return 0;
524}
525
526/*
527 * Saves at boot time configured settings for any netdevice.
528 */
529int __init netdev_boot_setup(char *str)
530{
531 int ints[5];
532 struct ifmap map;
533
534 str = get_options(str, ARRAY_SIZE(ints), ints);
535 if (!str || !*str)
536 return 0;
537
538 /* Save settings */
539 memset(&map, 0, sizeof(map));
540 if (ints[0] > 0)
541 map.irq = ints[1];
542 if (ints[0] > 1)
543 map.base_addr = ints[2];
544 if (ints[0] > 2)
545 map.mem_start = ints[3];
546 if (ints[0] > 3)
547 map.mem_end = ints[4];
548
549 /* Add new entry to the list */
550 return netdev_boot_setup_add(str, &map);
551}
552
553__setup("netdev=", netdev_boot_setup);
554
555/*******************************************************************************
556
557 Device Interface Subroutines
558
559*******************************************************************************/
560
561/**
562 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700563 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 * @name: name to find
565 *
566 * Find an interface by name. Must be called under RTNL semaphore
567 * or @dev_base_lock. If the name is found a pointer to the device
568 * is returned. If the name is not found then %NULL is returned. The
569 * reference counters are not incremented so the caller must be
570 * careful with locks.
571 */
572
Eric W. Biederman881d9662007-09-17 11:56:21 -0700573struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575 struct hlist_node *p;
576
Eric W. Biederman881d9662007-09-17 11:56:21 -0700577 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 struct net_device *dev
579 = hlist_entry(p, struct net_device, name_hlist);
580 if (!strncmp(dev->name, name, IFNAMSIZ))
581 return dev;
582 }
583 return NULL;
584}
585
586/**
587 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700588 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 * @name: name to find
590 *
591 * Find an interface by name. This can be called from any
592 * context and does its own locking. The returned handle has
593 * the usage count incremented and the caller must use dev_put() to
594 * release it when it is no longer needed. %NULL is returned if no
595 * matching device is found.
596 */
597
Eric W. Biederman881d9662007-09-17 11:56:21 -0700598struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599{
600 struct net_device *dev;
601
602 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700603 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (dev)
605 dev_hold(dev);
606 read_unlock(&dev_base_lock);
607 return dev;
608}
609
610/**
611 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700612 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 * @ifindex: index of device
614 *
615 * Search for an interface by index. Returns %NULL if the device
616 * is not found or a pointer to the device. The device has not
617 * had its reference counter increased so the caller must be careful
618 * about locking. The caller must hold either the RTNL semaphore
619 * or @dev_base_lock.
620 */
621
Eric W. Biederman881d9662007-09-17 11:56:21 -0700622struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 struct hlist_node *p;
625
Eric W. Biederman881d9662007-09-17 11:56:21 -0700626 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 struct net_device *dev
628 = hlist_entry(p, struct net_device, index_hlist);
629 if (dev->ifindex == ifindex)
630 return dev;
631 }
632 return NULL;
633}
634
635
636/**
637 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700638 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 * @ifindex: index of device
640 *
641 * Search for an interface by index. Returns NULL if the device
642 * is not found or a pointer to the device. The device returned has
643 * had a reference added and the pointer is safe until the user calls
644 * dev_put to indicate they have finished with it.
645 */
646
Eric W. Biederman881d9662007-09-17 11:56:21 -0700647struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 struct net_device *dev;
650
651 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700652 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 if (dev)
654 dev_hold(dev);
655 read_unlock(&dev_base_lock);
656 return dev;
657}
658
659/**
660 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700661 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 * @type: media type of device
663 * @ha: hardware address
664 *
665 * Search for an interface by MAC address. Returns NULL if the device
666 * is not found or a pointer to the device. The caller must hold the
667 * rtnl semaphore. The returned device has not had its ref count increased
668 * and the caller must therefore be careful about locking
669 *
670 * BUGS:
671 * If the API was consistent this would be __dev_get_by_hwaddr
672 */
673
Eric W. Biederman881d9662007-09-17 11:56:21 -0700674struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675{
676 struct net_device *dev;
677
678 ASSERT_RTNL();
679
Denis V. Lunev81103a52007-12-12 10:47:38 -0800680 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (dev->type == type &&
682 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700683 return dev;
684
685 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686}
687
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300688EXPORT_SYMBOL(dev_getbyhwaddr);
689
Eric W. Biederman881d9662007-09-17 11:56:21 -0700690struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700691{
692 struct net_device *dev;
693
694 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700695 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700696 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700697 return dev;
698
699 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700}
701
702EXPORT_SYMBOL(__dev_getfirstbyhwtype);
703
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 struct net_device *dev;
707
708 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700709 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700710 if (dev)
711 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 rtnl_unlock();
713 return dev;
714}
715
716EXPORT_SYMBOL(dev_getfirstbyhwtype);
717
718/**
719 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700720 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 * @if_flags: IFF_* values
722 * @mask: bitmask of bits in if_flags to check
723 *
724 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900725 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
728 */
729
Eric W. Biederman881d9662007-09-17 11:56:21 -0700730struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700732 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Pavel Emelianov7562f872007-05-03 15:13:45 -0700734 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700736 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 if (((dev->flags ^ if_flags) & mask) == 0) {
738 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700739 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741 }
742 }
743 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700744 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746
747/**
748 * dev_valid_name - check if name is okay for network device
749 * @name: name string
750 *
751 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700752 * to allow sysfs to work. We also disallow any kind of
753 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800755int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700757 if (*name == '\0')
758 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700759 if (strlen(name) >= IFNAMSIZ)
760 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 if (!strcmp(name, ".") || !strcmp(name, ".."))
762 return 0;
763
764 while (*name) {
765 if (*name == '/' || isspace(*name))
766 return 0;
767 name++;
768 }
769 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
772/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200773 * __dev_alloc_name - allocate a name for a device
774 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200776 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 *
778 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700779 * id. It scans list of devices to build up a free map, then chooses
780 * the first empty slot. The caller must hold the dev_base or rtnl lock
781 * while allocating the name and adding the device in order to avoid
782 * duplicates.
783 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
784 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 */
786
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200787static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 const char *p;
791 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700792 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 struct net_device *d;
794
795 p = strnchr(name, IFNAMSIZ-1, '%');
796 if (p) {
797 /*
798 * Verify the string as this thing may have come from
799 * the user. There must be either one "%d" and no other "%"
800 * characters.
801 */
802 if (p[1] != 'd' || strchr(p + 2, '%'))
803 return -EINVAL;
804
805 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700806 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 if (!inuse)
808 return -ENOMEM;
809
Eric W. Biederman881d9662007-09-17 11:56:21 -0700810 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (!sscanf(d->name, name, &i))
812 continue;
813 if (i < 0 || i >= max_netdevices)
814 continue;
815
816 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200817 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (!strncmp(buf, d->name, IFNAMSIZ))
819 set_bit(i, inuse);
820 }
821
822 i = find_first_zero_bit(inuse, max_netdevices);
823 free_page((unsigned long) inuse);
824 }
825
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
827 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 /* It is possible to run out of possible slots
831 * when the name is long and there isn't enough space left
832 * for the digits, or if all bits are used.
833 */
834 return -ENFILE;
835}
836
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200837/**
838 * dev_alloc_name - allocate a name for a device
839 * @dev: device
840 * @name: name format string
841 *
842 * Passed a format string - eg "lt%d" it will try and find a suitable
843 * id. It scans list of devices to build up a free map, then chooses
844 * the first empty slot. The caller must hold the dev_base or rtnl lock
845 * while allocating the name and adding the device in order to avoid
846 * duplicates.
847 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
848 * Returns the number of the unit assigned or a negative errno code.
849 */
850
851int dev_alloc_name(struct net_device *dev, const char *name)
852{
853 char buf[IFNAMSIZ];
854 struct net *net;
855 int ret;
856
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900857 BUG_ON(!dev_net(dev));
858 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200859 ret = __dev_alloc_name(net, name, buf);
860 if (ret >= 0)
861 strlcpy(dev->name, buf, IFNAMSIZ);
862 return ret;
863}
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866/**
867 * dev_change_name - change name of a device
868 * @dev: device
869 * @newname: name (or format string) must be at least IFNAMSIZ
870 *
871 * Change name of a device, can pass format strings "eth%d".
872 * for wildcarding.
873 */
874int dev_change_name(struct net_device *dev, char *newname)
875{
Herbert Xufcc5a032007-07-30 17:03:38 -0700876 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700878 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700879 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900882 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900884 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (dev->flags & IFF_UP)
886 return -EBUSY;
887
888 if (!dev_valid_name(newname))
889 return -EINVAL;
890
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700891 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
892 return 0;
893
Herbert Xufcc5a032007-07-30 17:03:38 -0700894 memcpy(oldname, dev->name, IFNAMSIZ);
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (strchr(newname, '%')) {
897 err = dev_alloc_name(dev, newname);
898 if (err < 0)
899 return err;
900 strcpy(newname, dev->name);
901 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700902 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return -EEXIST;
904 else
905 strlcpy(dev->name, newname, IFNAMSIZ);
906
Herbert Xufcc5a032007-07-30 17:03:38 -0700907rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700908 err = device_rename(&dev->dev, dev->name);
909 if (err) {
910 memcpy(dev->name, oldname, IFNAMSIZ);
911 return err;
912 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700913
914 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600915 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700916 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700917 write_unlock_bh(&dev_base_lock);
918
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700919 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700920 ret = notifier_to_errno(ret);
921
922 if (ret) {
923 if (err) {
924 printk(KERN_ERR
925 "%s: name change rollback failed: %d.\n",
926 dev->name, ret);
927 } else {
928 err = ret;
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 goto rollback;
931 }
932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 return err;
935}
936
937/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700938 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700939 * @dev: device to cause notification
940 *
941 * Called to indicate a device has changed features.
942 */
943void netdev_features_change(struct net_device *dev)
944{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700945 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700946}
947EXPORT_SYMBOL(netdev_features_change);
948
949/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 * netdev_state_change - device changes state
951 * @dev: device to cause notification
952 *
953 * Called to indicate a device has changed state. This function calls
954 * the notifier chains for netdev_chain and sends a NEWLINK message
955 * to the routing socket.
956 */
957void netdev_state_change(struct net_device *dev)
958{
959 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700960 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
962 }
963}
964
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700965void netdev_bonding_change(struct net_device *dev)
966{
967 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
968}
969EXPORT_SYMBOL(netdev_bonding_change);
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/**
972 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700973 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * @name: name of interface
975 *
976 * If a network interface is not present and the process has suitable
977 * privileges this function loads the module. If module loading is not
978 * available in this kernel then it becomes a nop.
979 */
980
Eric W. Biederman881d9662007-09-17 11:56:21 -0700981void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900983 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
985 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700986 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 read_unlock(&dev_base_lock);
988
989 if (!dev && capable(CAP_SYS_MODULE))
990 request_module("%s", name);
991}
992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993/**
994 * dev_open - prepare an interface for use.
995 * @dev: device to open
996 *
997 * Takes a device from down to up state. The device's private open
998 * function is invoked and then the multicast lists are loaded. Finally
999 * the device is moved into the up state and a %NETDEV_UP message is
1000 * sent to the netdev notifier chain.
1001 *
1002 * Calling this function on an active interface is a nop. On a failure
1003 * a negative errno code is returned.
1004 */
1005int dev_open(struct net_device *dev)
1006{
1007 int ret = 0;
1008
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001009 ASSERT_RTNL();
1010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 /*
1012 * Is it already up?
1013 */
1014
1015 if (dev->flags & IFF_UP)
1016 return 0;
1017
1018 /*
1019 * Is it even present?
1020 */
1021 if (!netif_device_present(dev))
1022 return -ENODEV;
1023
1024 /*
1025 * Call device private open method
1026 */
1027 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001028
1029 if (dev->validate_addr)
1030 ret = dev->validate_addr(dev);
1031
1032 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001035 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 * If it went open OK then:
1037 */
1038
Jeff Garzikbada3392007-10-23 20:19:37 -07001039 if (ret)
1040 clear_bit(__LINK_STATE_START, &dev->state);
1041 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 /*
1043 * Set the flags.
1044 */
1045 dev->flags |= IFF_UP;
1046
1047 /*
1048 * Initialize multicasting status
1049 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001050 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /*
1053 * Wakeup transmit queue engine
1054 */
1055 dev_activate(dev);
1056
1057 /*
1058 * ... and announce new interface.
1059 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001060 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return ret;
1064}
1065
1066/**
1067 * dev_close - shutdown an interface.
1068 * @dev: device to shutdown
1069 *
1070 * This function moves an active device into down state. A
1071 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1072 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1073 * chain.
1074 */
1075int dev_close(struct net_device *dev)
1076{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001077 ASSERT_RTNL();
1078
David S. Miller9d5010d2007-09-12 14:33:25 +02001079 might_sleep();
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 if (!(dev->flags & IFF_UP))
1082 return 0;
1083
1084 /*
1085 * Tell people we are going down, so that they can
1086 * prepare to death, when device is still operating.
1087 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001088 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 clear_bit(__LINK_STATE_START, &dev->state);
1091
1092 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001093 * it can be even on different cpu. So just clear netif_running().
1094 *
1095 * dev->stop() will invoke napi_disable() on all of it's
1096 * napi_struct instances on this device.
1097 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001100 dev_deactivate(dev);
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 /*
1103 * Call the device specific close. This cannot fail.
1104 * Only if device is UP
1105 *
1106 * We allow it to be called even after a DETACH hot-plug
1107 * event.
1108 */
1109 if (dev->stop)
1110 dev->stop(dev);
1111
1112 /*
1113 * Device is now down.
1114 */
1115
1116 dev->flags &= ~IFF_UP;
1117
1118 /*
1119 * Tell people we are down
1120 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001121 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 return 0;
1124}
1125
1126
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001127/**
1128 * dev_disable_lro - disable Large Receive Offload on a device
1129 * @dev: device
1130 *
1131 * Disable Large Receive Offload (LRO) on a net device. Must be
1132 * called under RTNL. This is needed if received packets may be
1133 * forwarded to another interface.
1134 */
1135void dev_disable_lro(struct net_device *dev)
1136{
1137 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1138 dev->ethtool_ops->set_flags) {
1139 u32 flags = dev->ethtool_ops->get_flags(dev);
1140 if (flags & ETH_FLAG_LRO) {
1141 flags &= ~ETH_FLAG_LRO;
1142 dev->ethtool_ops->set_flags(dev, flags);
1143 }
1144 }
1145 WARN_ON(dev->features & NETIF_F_LRO);
1146}
1147EXPORT_SYMBOL(dev_disable_lro);
1148
1149
Eric W. Biederman881d9662007-09-17 11:56:21 -07001150static int dev_boot_phase = 1;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152/*
1153 * Device change register/unregister. These are not inline or static
1154 * as we export them to the world.
1155 */
1156
1157/**
1158 * register_netdevice_notifier - register a network notifier block
1159 * @nb: notifier
1160 *
1161 * Register a notifier to be called when network device events occur.
1162 * The notifier passed is linked into the kernel structures and must
1163 * not be reused until it has been unregistered. A negative errno code
1164 * is returned on a failure.
1165 *
1166 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001167 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 * view of the network device list.
1169 */
1170
1171int register_netdevice_notifier(struct notifier_block *nb)
1172{
1173 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001174 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001175 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 int err;
1177
1178 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001179 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001180 if (err)
1181 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001182 if (dev_boot_phase)
1183 goto unlock;
1184 for_each_net(net) {
1185 for_each_netdev(net, dev) {
1186 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1187 err = notifier_to_errno(err);
1188 if (err)
1189 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
Eric W. Biederman881d9662007-09-17 11:56:21 -07001191 if (!(dev->flags & IFF_UP))
1192 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001193
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 nb->notifier_call(nb, NETDEV_UP, dev);
1195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001197
1198unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 rtnl_unlock();
1200 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001201
1202rollback:
1203 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001204 for_each_net(net) {
1205 for_each_netdev(net, dev) {
1206 if (dev == last)
1207 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001208
Eric W. Biederman881d9662007-09-17 11:56:21 -07001209 if (dev->flags & IFF_UP) {
1210 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1211 nb->notifier_call(nb, NETDEV_DOWN, dev);
1212 }
1213 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001214 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001215 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001216
1217 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219}
1220
1221/**
1222 * unregister_netdevice_notifier - unregister a network notifier block
1223 * @nb: notifier
1224 *
1225 * Unregister a notifier previously registered by
1226 * register_netdevice_notifier(). The notifier is unlinked into the
1227 * kernel structures and may then be reused. A negative errno code
1228 * is returned on a failure.
1229 */
1230
1231int unregister_netdevice_notifier(struct notifier_block *nb)
1232{
Herbert Xu9f514952006-03-25 01:24:25 -08001233 int err;
1234
1235 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001236 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001237 rtnl_unlock();
1238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239}
1240
1241/**
1242 * call_netdevice_notifiers - call all network notifier blocks
1243 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001244 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 *
1246 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001247 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 */
1249
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001250int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001252 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}
1254
1255/* When > 0 there are consumers of rx skb time stamps */
1256static atomic_t netstamp_needed = ATOMIC_INIT(0);
1257
1258void net_enable_timestamp(void)
1259{
1260 atomic_inc(&netstamp_needed);
1261}
1262
1263void net_disable_timestamp(void)
1264{
1265 atomic_dec(&netstamp_needed);
1266}
1267
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001268static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
1270 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001271 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001272 else
1273 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
1276/*
1277 * Support routine. Sends outgoing frames to any network
1278 * taps currently in use.
1279 */
1280
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001281static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282{
1283 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001284
1285 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 rcu_read_lock();
1288 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1289 /* Never send packets back to the socket
1290 * they originated from - MvS (miquels@drinkel.ow.org)
1291 */
1292 if ((ptype->dev == dev || !ptype->dev) &&
1293 (ptype->af_packet_priv == NULL ||
1294 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1295 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1296 if (!skb2)
1297 break;
1298
1299 /* skb->nh should be correctly
1300 set by sender, so that the second statement is
1301 just protection against buggy protocols.
1302 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001303 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001305 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001306 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 if (net_ratelimit())
1308 printk(KERN_CRIT "protocol %04x is "
1309 "buggy, dev %s\n",
1310 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001311 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 }
1313
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001314 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001316 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 }
1318 }
1319 rcu_read_unlock();
1320}
1321
Denis Vlasenko56079432006-03-29 15:57:29 -08001322
1323void __netif_schedule(struct net_device *dev)
1324{
1325 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
David S. Milleree609cb2008-07-08 22:58:37 -07001326 struct netdev_queue *txq = &dev->tx_queue;
Denis Vlasenko56079432006-03-29 15:57:29 -08001327 unsigned long flags;
1328 struct softnet_data *sd;
1329
1330 local_irq_save(flags);
1331 sd = &__get_cpu_var(softnet_data);
David S. Milleree609cb2008-07-08 22:58:37 -07001332 txq->next_sched = sd->output_queue;
1333 sd->output_queue = txq;
Denis Vlasenko56079432006-03-29 15:57:29 -08001334 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1335 local_irq_restore(flags);
1336 }
1337}
1338EXPORT_SYMBOL(__netif_schedule);
1339
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001340void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001341{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001342 if (atomic_dec_and_test(&skb->users)) {
1343 struct softnet_data *sd;
1344 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001345
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001346 local_irq_save(flags);
1347 sd = &__get_cpu_var(softnet_data);
1348 skb->next = sd->completion_queue;
1349 sd->completion_queue = skb;
1350 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1351 local_irq_restore(flags);
1352 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001353}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001354EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001355
1356void dev_kfree_skb_any(struct sk_buff *skb)
1357{
1358 if (in_irq() || irqs_disabled())
1359 dev_kfree_skb_irq(skb);
1360 else
1361 dev_kfree_skb(skb);
1362}
1363EXPORT_SYMBOL(dev_kfree_skb_any);
1364
1365
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001366/**
1367 * netif_device_detach - mark device as removed
1368 * @dev: network device
1369 *
1370 * Mark device as removed from system and therefore no longer available.
1371 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001372void netif_device_detach(struct net_device *dev)
1373{
1374 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1375 netif_running(dev)) {
1376 netif_stop_queue(dev);
1377 }
1378}
1379EXPORT_SYMBOL(netif_device_detach);
1380
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001381/**
1382 * netif_device_attach - mark device as attached
1383 * @dev: network device
1384 *
1385 * Mark device as attached from system and restart if needed.
1386 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001387void netif_device_attach(struct net_device *dev)
1388{
1389 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1390 netif_running(dev)) {
1391 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001392 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001393 }
1394}
1395EXPORT_SYMBOL(netif_device_attach);
1396
Ben Hutchings6de329e2008-06-16 17:02:28 -07001397static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1398{
1399 return ((features & NETIF_F_GEN_CSUM) ||
1400 ((features & NETIF_F_IP_CSUM) &&
1401 protocol == htons(ETH_P_IP)) ||
1402 ((features & NETIF_F_IPV6_CSUM) &&
1403 protocol == htons(ETH_P_IPV6)));
1404}
1405
1406static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1407{
1408 if (can_checksum_protocol(dev->features, skb->protocol))
1409 return true;
1410
1411 if (skb->protocol == htons(ETH_P_8021Q)) {
1412 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1413 if (can_checksum_protocol(dev->features & dev->vlan_features,
1414 veh->h_vlan_encapsulated_proto))
1415 return true;
1416 }
1417
1418 return false;
1419}
Denis Vlasenko56079432006-03-29 15:57:29 -08001420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421/*
1422 * Invalidate hardware checksum when packet is to be mangled, and
1423 * complete checksum manually on outgoing path.
1424 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001425int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Al Virod3bc23e2006-11-14 21:24:49 -08001427 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001428 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Patrick McHardy84fa7932006-08-29 16:44:56 -07001430 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001431 goto out_set_summed;
1432
1433 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001434 /* Let GSO fix up the checksum. */
1435 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
1437
Herbert Xua0308472007-10-15 01:47:15 -07001438 offset = skb->csum_start - skb_headroom(skb);
1439 BUG_ON(offset >= skb_headlen(skb));
1440 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1441
1442 offset += skb->csum_offset;
1443 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1444
1445 if (skb_cloned(skb) &&
1446 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1448 if (ret)
1449 goto out;
1450 }
1451
Herbert Xua0308472007-10-15 01:47:15 -07001452 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001453out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001455out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return ret;
1457}
1458
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001459/**
1460 * skb_gso_segment - Perform segmentation on skb.
1461 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001462 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001463 *
1464 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001465 *
1466 * It may return NULL if the skb requires no segmentation. This is
1467 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001468 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001469struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001470{
1471 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1472 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001473 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001474 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001475
1476 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001477
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001478 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001479 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001480 __skb_pull(skb, skb->mac_len);
1481
Herbert Xuf9d106a2007-04-23 22:36:13 -07001482 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001483 if (skb_header_cloned(skb) &&
1484 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1485 return ERR_PTR(err);
1486 }
1487
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001488 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001489 list_for_each_entry_rcu(ptype,
1490 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001491 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001492 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001493 err = ptype->gso_send_check(skb);
1494 segs = ERR_PTR(err);
1495 if (err || skb_gso_ok(skb, features))
1496 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001497 __skb_push(skb, (skb->data -
1498 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001499 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001500 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001501 break;
1502 }
1503 }
1504 rcu_read_unlock();
1505
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001506 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001507
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001508 return segs;
1509}
1510
1511EXPORT_SYMBOL(skb_gso_segment);
1512
Herbert Xufb286bb2005-11-10 13:01:24 -08001513/* Take action when hardware reception checksum errors are detected. */
1514#ifdef CONFIG_BUG
1515void netdev_rx_csum_fault(struct net_device *dev)
1516{
1517 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001518 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001519 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001520 dump_stack();
1521 }
1522}
1523EXPORT_SYMBOL(netdev_rx_csum_fault);
1524#endif
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526/* Actually, we should eliminate this check as soon as we know, that:
1527 * 1. IOMMU is present and allows to map all the memory.
1528 * 2. No high memory really exists on this machine.
1529 */
1530
1531static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1532{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001533#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 int i;
1535
1536 if (dev->features & NETIF_F_HIGHDMA)
1537 return 0;
1538
1539 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1540 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1541 return 1;
1542
Herbert Xu3d3a8532006-06-27 13:33:10 -07001543#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 return 0;
1545}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001547struct dev_gso_cb {
1548 void (*destructor)(struct sk_buff *skb);
1549};
1550
1551#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1552
1553static void dev_gso_skb_destructor(struct sk_buff *skb)
1554{
1555 struct dev_gso_cb *cb;
1556
1557 do {
1558 struct sk_buff *nskb = skb->next;
1559
1560 skb->next = nskb->next;
1561 nskb->next = NULL;
1562 kfree_skb(nskb);
1563 } while (skb->next);
1564
1565 cb = DEV_GSO_CB(skb);
1566 if (cb->destructor)
1567 cb->destructor(skb);
1568}
1569
1570/**
1571 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1572 * @skb: buffer to segment
1573 *
1574 * This function segments the given skb and stores the list of segments
1575 * in skb->next.
1576 */
1577static int dev_gso_segment(struct sk_buff *skb)
1578{
1579 struct net_device *dev = skb->dev;
1580 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001581 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1582 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583
Herbert Xu576a30e2006-06-27 13:22:38 -07001584 segs = skb_gso_segment(skb, features);
1585
1586 /* Verifying header integrity only. */
1587 if (!segs)
1588 return 0;
1589
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001590 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001591 return PTR_ERR(segs);
1592
1593 skb->next = segs;
1594 DEV_GSO_CB(skb)->destructor = skb->destructor;
1595 skb->destructor = dev_gso_skb_destructor;
1596
1597 return 0;
1598}
1599
1600int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1601{
1602 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001603 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001604 dev_queue_xmit_nit(skb, dev);
1605
Herbert Xu576a30e2006-06-27 13:22:38 -07001606 if (netif_needs_gso(dev, skb)) {
1607 if (unlikely(dev_gso_segment(skb)))
1608 goto out_kfree_skb;
1609 if (skb->next)
1610 goto gso;
1611 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001612
Herbert Xu576a30e2006-06-27 13:22:38 -07001613 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001614 }
1615
Herbert Xu576a30e2006-06-27 13:22:38 -07001616gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001617 do {
1618 struct sk_buff *nskb = skb->next;
1619 int rc;
1620
1621 skb->next = nskb->next;
1622 nskb->next = NULL;
1623 rc = dev->hard_start_xmit(nskb, dev);
1624 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001625 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001626 skb->next = nskb;
1627 return rc;
1628 }
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001629 if (unlikely((netif_queue_stopped(dev) ||
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001630 netif_subqueue_stopped(dev, skb)) &&
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001631 skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001632 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001633 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001634
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001635 skb->destructor = DEV_GSO_CB(skb)->destructor;
1636
1637out_kfree_skb:
1638 kfree_skb(skb);
1639 return 0;
1640}
1641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642/**
1643 * dev_queue_xmit - transmit a buffer
1644 * @skb: buffer to transmit
1645 *
1646 * Queue a buffer for transmission to a network device. The caller must
1647 * have set the device and priority and built the buffer before calling
1648 * this function. The function can be called from an interrupt.
1649 *
1650 * A negative errno code is returned on a failure. A success does not
1651 * guarantee the frame will be transmitted as it may be dropped due
1652 * to congestion or traffic shaping.
Ben Greearaf191362005-04-24 20:12:36 -07001653 *
1654 * -----------------------------------------------------------------------------------
1655 * I notice this method can also return errors from the queue disciplines,
1656 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1657 * be positive.
1658 *
1659 * Regardless of the return value, the skb is consumed, so it is currently
1660 * difficult to retry a send to this method. (You can bump the ref count
1661 * before sending to hold a reference for retry if you are careful.)
1662 *
1663 * When calling this method, interrupts MUST be enabled. This is because
1664 * the BH enable code must have IRQs enabled so that it will not deadlock.
1665 * --BLG
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 */
1667
1668int dev_queue_xmit(struct sk_buff *skb)
1669{
1670 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001671 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 struct Qdisc *q;
1673 int rc = -ENOMEM;
1674
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001675 /* GSO will handle the following emulations directly. */
1676 if (netif_needs_gso(dev, skb))
1677 goto gso;
1678
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 if (skb_shinfo(skb)->frag_list &&
1680 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001681 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 goto out_kfree_skb;
1683
1684 /* Fragmented skb is linearized if device does not support SG,
1685 * or if at least one of fragments is in highmem and device
1686 * does not support DMA from it.
1687 */
1688 if (skb_shinfo(skb)->nr_frags &&
1689 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001690 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 goto out_kfree_skb;
1692
1693 /* If packet is not checksummed and device does not support
1694 * checksumming for this protocol, complete checksumming here.
1695 */
Herbert Xu663ead32007-04-09 11:59:07 -07001696 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1697 skb_set_transport_header(skb, skb->csum_start -
1698 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001699 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1700 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001703gso:
David S. Millerdc2b4842008-07-08 17:18:23 -07001704 txq = &dev->tx_queue;
1705 spin_lock_prefetch(&txq->lock);
Eric Dumazet2d7ceec2005-09-27 15:22:58 -07001706
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001707 /* Disable soft irqs for various locks below. Also
1708 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001710 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
David S. Millerdc2b4842008-07-08 17:18:23 -07001712 /* Updates of qdisc are serialized by queue->lock.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001713 * The struct Qdisc which is pointed to by qdisc is now a
1714 * rcu structure - it may be accessed without acquiring
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 * a lock (but the structure may be stale.) The freeing of the
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001716 * qdisc will be deferred until it's known that there are no
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 * more references to it.
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001718 *
1719 * If the qdisc has an enqueue function, we still need to
David S. Millerdc2b4842008-07-08 17:18:23 -07001720 * hold the queue->lock before calling it, since queue->lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 * also serializes access to the device queue.
1722 */
1723
David S. Millerb0e1e642008-07-08 17:42:10 -07001724 q = rcu_dereference(txq->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725#ifdef CONFIG_NET_CLS_ACT
1726 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1727#endif
1728 if (q->enqueue) {
1729 /* Grab device queue */
David S. Millerdc2b4842008-07-08 17:18:23 -07001730 spin_lock(&txq->lock);
David S. Millerb0e1e642008-07-08 17:42:10 -07001731 q = txq->qdisc;
Patrick McHardy85670cc2006-09-27 16:45:45 -07001732 if (q->enqueue) {
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001733 /* reset queue_mapping to zero */
Pavel Emelyanovdfa40912007-10-21 16:57:55 -07001734 skb_set_queue_mapping(skb, 0);
Patrick McHardy85670cc2006-09-27 16:45:45 -07001735 rc = q->enqueue(skb, q);
1736 qdisc_run(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07001737 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Patrick McHardy85670cc2006-09-27 16:45:45 -07001739 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1740 goto out;
1741 }
David S. Millerdc2b4842008-07-08 17:18:23 -07001742 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
1744
1745 /* The device has no queue. Common case for software devices:
1746 loopback, all the sorts of tunnels...
1747
Herbert Xu932ff272006-06-09 12:20:56 -07001748 Really, it is unlikely that netif_tx_lock protection is necessary
1749 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 counters.)
1751 However, it is possible, that they rely on protection
1752 made by us here.
1753
1754 Check this and shot the lock. It is not prone from deadlocks.
1755 Either shot noqueue qdisc, it is even simpler 8)
1756 */
1757 if (dev->flags & IFF_UP) {
1758 int cpu = smp_processor_id(); /* ok because BHs are off */
1759
1760 if (dev->xmit_lock_owner != cpu) {
1761
1762 HARD_TX_LOCK(dev, cpu);
1763
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07001764 if (!netif_queue_stopped(dev) &&
Pavel Emelyanov668f8952007-10-21 17:01:56 -07001765 !netif_subqueue_stopped(dev, skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 rc = 0;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001767 if (!dev_hard_start_xmit(skb, dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 HARD_TX_UNLOCK(dev);
1769 goto out;
1770 }
1771 }
1772 HARD_TX_UNLOCK(dev);
1773 if (net_ratelimit())
1774 printk(KERN_CRIT "Virtual device %s asks to "
1775 "queue packet!\n", dev->name);
1776 } else {
1777 /* Recursion is detected! It is possible,
1778 * unfortunately */
1779 if (net_ratelimit())
1780 printk(KERN_CRIT "Dead loop on virtual device "
1781 "%s, fix it urgently!\n", dev->name);
1782 }
1783 }
1784
1785 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001786 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788out_kfree_skb:
1789 kfree_skb(skb);
1790 return rc;
1791out:
Herbert Xud4828d82006-06-22 02:28:18 -07001792 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return rc;
1794}
1795
1796
1797/*=======================================================================
1798 Receiver routines
1799 =======================================================================*/
1800
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001801int netdev_max_backlog __read_mostly = 1000;
1802int netdev_budget __read_mostly = 300;
1803int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
1805DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1806
1807
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808/**
1809 * netif_rx - post buffer to the network code
1810 * @skb: buffer to post
1811 *
1812 * This function receives a packet from a device driver and queues it for
1813 * the upper (protocol) levels to process. It always succeeds. The buffer
1814 * may be dropped during processing for congestion control or by the
1815 * protocol layers.
1816 *
1817 * return values:
1818 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 * NET_RX_DROP (packet was dropped)
1820 *
1821 */
1822
1823int netif_rx(struct sk_buff *skb)
1824{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 struct softnet_data *queue;
1826 unsigned long flags;
1827
1828 /* if netpoll wants it, pretend we never saw it */
1829 if (netpoll_rx(skb))
1830 return NET_RX_DROP;
1831
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001832 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001833 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 /*
1836 * The code is rearranged so that the path is the most
1837 * short when CPU is congested, but is still operating.
1838 */
1839 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 queue = &__get_cpu_var(softnet_data);
1841
1842 __get_cpu_var(netdev_rx_stat).total++;
1843 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1844 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845enqueue:
1846 dev_hold(skb->dev);
1847 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001849 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 }
1851
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001852 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 goto enqueue;
1854 }
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 __get_cpu_var(netdev_rx_stat).dropped++;
1857 local_irq_restore(flags);
1858
1859 kfree_skb(skb);
1860 return NET_RX_DROP;
1861}
1862
1863int netif_rx_ni(struct sk_buff *skb)
1864{
1865 int err;
1866
1867 preempt_disable();
1868 err = netif_rx(skb);
1869 if (local_softirq_pending())
1870 do_softirq();
1871 preempt_enable();
1872
1873 return err;
1874}
1875
1876EXPORT_SYMBOL(netif_rx_ni);
1877
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001878static inline struct net_device *skb_bond(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
1880 struct net_device *dev = skb->dev;
1881
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001882 if (dev->master) {
David S. Miller7ea49ed2006-08-14 17:08:36 -07001883 if (skb_bond_should_drop(skb)) {
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001884 kfree_skb(skb);
1885 return NULL;
1886 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 skb->dev = dev->master;
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001888 }
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001889
1890 return dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891}
1892
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001893
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894static void net_tx_action(struct softirq_action *h)
1895{
1896 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1897
1898 if (sd->completion_queue) {
1899 struct sk_buff *clist;
1900
1901 local_irq_disable();
1902 clist = sd->completion_queue;
1903 sd->completion_queue = NULL;
1904 local_irq_enable();
1905
1906 while (clist) {
1907 struct sk_buff *skb = clist;
1908 clist = clist->next;
1909
1910 BUG_TRAP(!atomic_read(&skb->users));
1911 __kfree_skb(skb);
1912 }
1913 }
1914
1915 if (sd->output_queue) {
David S. Milleree609cb2008-07-08 22:58:37 -07001916 struct netdev_queue *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 local_irq_disable();
1919 head = sd->output_queue;
1920 sd->output_queue = NULL;
1921 local_irq_enable();
1922
1923 while (head) {
David S. Milleree609cb2008-07-08 22:58:37 -07001924 struct netdev_queue *txq = head;
1925 struct net_device *dev = txq->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 head = head->next_sched;
1927
1928 smp_mb__before_clear_bit();
1929 clear_bit(__LINK_STATE_SCHED, &dev->state);
1930
David S. Millerdc2b4842008-07-08 17:18:23 -07001931 if (spin_trylock(&txq->lock)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 qdisc_run(dev);
David S. Millerdc2b4842008-07-08 17:18:23 -07001933 spin_unlock(&txq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 } else {
1935 netif_schedule(dev);
1936 }
1937 }
1938 }
1939}
1940
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001941static inline int deliver_skb(struct sk_buff *skb,
1942 struct packet_type *pt_prev,
1943 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001946 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947}
1948
1949#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07001950/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951struct net_bridge;
1952struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1953 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07001954void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Stephen Hemminger6229e362007-03-21 13:38:47 -07001956/*
1957 * If bridge module is loaded call bridging hook.
1958 * returns NULL if packet was consumed.
1959 */
1960struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1961 struct sk_buff *skb) __read_mostly;
1962static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1963 struct packet_type **pt_prev, int *ret,
1964 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
1966 struct net_bridge_port *port;
1967
Stephen Hemminger6229e362007-03-21 13:38:47 -07001968 if (skb->pkt_type == PACKET_LOOPBACK ||
1969 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1970 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07001973 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001975 }
1976
Stephen Hemminger6229e362007-03-21 13:38:47 -07001977 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07001980#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981#endif
1982
Patrick McHardyb863ceb2007-07-14 18:55:06 -07001983#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1984struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1985EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1986
1987static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1988 struct packet_type **pt_prev,
1989 int *ret,
1990 struct net_device *orig_dev)
1991{
1992 if (skb->dev->macvlan_port == NULL)
1993 return skb;
1994
1995 if (*pt_prev) {
1996 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1997 *pt_prev = NULL;
1998 }
1999 return macvlan_handle_frame_hook(skb);
2000}
2001#else
2002#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2003#endif
2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005#ifdef CONFIG_NET_CLS_ACT
2006/* TODO: Maybe we should just force sch_ingress to be compiled in
2007 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2008 * a compare and 2 stores extra right now if we dont have it on
2009 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002010 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 * the ingress scheduler, you just cant add policies on ingress.
2012 *
2013 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002014static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002017 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002018 struct netdev_queue *rxq;
2019 int result = TC_ACT_OK;
2020 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002021
Herbert Xuf697c3e2007-10-14 00:38:47 -07002022 if (MAX_RED_LOOP < ttl++) {
2023 printk(KERN_WARNING
2024 "Redir loop detected Dropping packet (%d->%d)\n",
2025 skb->iif, dev->ifindex);
2026 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 }
2028
Herbert Xuf697c3e2007-10-14 00:38:47 -07002029 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2030 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2031
David S. Miller555353c2008-07-08 17:33:13 -07002032 rxq = &dev->rx_queue;
2033
2034 spin_lock(&rxq->lock);
David S. Miller816f3252008-07-08 22:49:00 -07002035 if ((q = rxq->qdisc) != NULL)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002036 result = q->enqueue(skb, q);
David S. Miller555353c2008-07-08 17:33:13 -07002037 spin_unlock(&rxq->lock);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return result;
2040}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002041
2042static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2043 struct packet_type **pt_prev,
2044 int *ret, struct net_device *orig_dev)
2045{
David S. Miller816f3252008-07-08 22:49:00 -07002046 if (!skb->dev->rx_queue.qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002047 goto out;
2048
2049 if (*pt_prev) {
2050 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2051 *pt_prev = NULL;
2052 } else {
2053 /* Huh? Why does turning on AF_PACKET affect this? */
2054 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2055 }
2056
2057 switch (ing_filter(skb)) {
2058 case TC_ACT_SHOT:
2059 case TC_ACT_STOLEN:
2060 kfree_skb(skb);
2061 return NULL;
2062 }
2063
2064out:
2065 skb->tc_verd = 0;
2066 return skb;
2067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068#endif
2069
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002070/**
2071 * netif_receive_skb - process receive buffer from network
2072 * @skb: buffer to process
2073 *
2074 * netif_receive_skb() is the main receive data processing function.
2075 * It always succeeds. The buffer may be dropped during processing
2076 * for congestion control or by the protocol layers.
2077 *
2078 * This function may only be called from softirq context and interrupts
2079 * should be enabled.
2080 *
2081 * Return values (usually ignored):
2082 * NET_RX_SUCCESS: no congestion
2083 * NET_RX_DROP: packet was dropped
2084 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085int netif_receive_skb(struct sk_buff *skb)
2086{
2087 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002088 struct net_device *orig_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002090 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002093 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 return NET_RX_DROP;
2095
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002096 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002097 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Patrick McHardyc01003c2007-03-29 11:46:52 -07002099 if (!skb->iif)
2100 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002101
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002102 orig_dev = skb_bond(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002104 if (!orig_dev)
2105 return NET_RX_DROP;
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 __get_cpu_var(netdev_rx_stat).total++;
2108
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002109 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002110 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002111 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 pt_prev = NULL;
2114
2115 rcu_read_lock();
2116
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002117 /* Don't receive packets in an exiting network namespace */
2118 if (!net_alive(dev_net(skb->dev)))
2119 goto out;
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121#ifdef CONFIG_NET_CLS_ACT
2122 if (skb->tc_verd & TC_NCLS) {
2123 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2124 goto ncls;
2125 }
2126#endif
2127
2128 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2129 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002130 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002131 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 pt_prev = ptype;
2133 }
2134 }
2135
2136#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002137 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2138 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140ncls:
2141#endif
2142
Stephen Hemminger6229e362007-03-21 13:38:47 -07002143 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2144 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002146 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2147 if (!skb)
2148 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002151 list_for_each_entry_rcu(ptype,
2152 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 if (ptype->type == type &&
2154 (!ptype->dev || ptype->dev == skb->dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002155 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002156 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 pt_prev = ptype;
2158 }
2159 }
2160
2161 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002162 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 } else {
2164 kfree_skb(skb);
2165 /* Jamal, now you will not able to escape explaining
2166 * me how you were going to use this. :-)
2167 */
2168 ret = NET_RX_DROP;
2169 }
2170
2171out:
2172 rcu_read_unlock();
2173 return ret;
2174}
2175
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002176static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177{
2178 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2180 unsigned long start_time = jiffies;
2181
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002182 napi->weight = weight_p;
2183 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 struct sk_buff *skb;
2185 struct net_device *dev;
2186
2187 local_irq_disable();
2188 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002189 if (!skb) {
2190 __napi_complete(napi);
2191 local_irq_enable();
2192 break;
2193 }
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 local_irq_enable();
2196
2197 dev = skb->dev;
2198
2199 netif_receive_skb(skb);
2200
2201 dev_put(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002202 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002204 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205}
2206
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002207/**
2208 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002209 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002210 *
2211 * The entry's receive function will be scheduled to run
2212 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002213void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002214{
2215 unsigned long flags;
2216
2217 local_irq_save(flags);
2218 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2219 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2220 local_irq_restore(flags);
2221}
2222EXPORT_SYMBOL(__napi_schedule);
2223
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225static void net_rx_action(struct softirq_action *h)
2226{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002227 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002229 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002230 void *have;
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 local_irq_disable();
2233
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002234 while (!list_empty(list)) {
2235 struct napi_struct *n;
2236 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002238 /* If softirq window is exhuasted then punt.
2239 *
2240 * Note that this is a slight policy change from the
2241 * previous NAPI code, which would allow up to 2
2242 * jiffies to pass before breaking out. The test
2243 * used to be "jiffies - start_time > 1".
2244 */
2245 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 goto softnet_break;
2247
2248 local_irq_enable();
2249
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002250 /* Even though interrupts have been re-enabled, this
2251 * access is safe because interrupts can only add new
2252 * entries to the tail of this list, and only ->poll()
2253 * calls can remove this head entry from the list.
2254 */
2255 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002257 have = netpoll_poll_lock(n);
2258
2259 weight = n->weight;
2260
David S. Miller0a7606c2007-10-29 21:28:47 -07002261 /* This NAPI_STATE_SCHED test is for avoiding a race
2262 * with netpoll's poll_napi(). Only the entity which
2263 * obtains the lock and sees NAPI_STATE_SCHED set will
2264 * actually make the ->poll() call. Therefore we avoid
2265 * accidently calling ->poll() when NAPI is not scheduled.
2266 */
2267 work = 0;
2268 if (test_bit(NAPI_STATE_SCHED, &n->state))
2269 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002270
2271 WARN_ON_ONCE(work > weight);
2272
2273 budget -= work;
2274
2275 local_irq_disable();
2276
2277 /* Drivers must not modify the NAPI state if they
2278 * consume the entire weight. In such cases this code
2279 * still "owns" the NAPI instance and therefore can
2280 * move the instance around on the list at-will.
2281 */
David S. Millerfed17f32008-01-07 21:00:40 -08002282 if (unlikely(work == weight)) {
2283 if (unlikely(napi_disable_pending(n)))
2284 __napi_complete(n);
2285 else
2286 list_move_tail(&n->poll_list, list);
2287 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002288
2289 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 }
2291out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002292 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002293
Chris Leechdb217332006-06-17 21:24:58 -07002294#ifdef CONFIG_NET_DMA
2295 /*
2296 * There may not be any more sk_buffs coming right now, so push
2297 * any pending DMA copies to hardware
2298 */
Dan Williamsd379b012007-07-09 11:56:42 -07002299 if (!cpus_empty(net_dma.channel_mask)) {
2300 int chan_idx;
2301 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2302 struct dma_chan *chan = net_dma.channels[chan_idx];
2303 if (chan)
2304 dma_async_memcpy_issue_pending(chan);
2305 }
Chris Leechdb217332006-06-17 21:24:58 -07002306 }
2307#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 return;
2310
2311softnet_break:
2312 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2313 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2314 goto out;
2315}
2316
2317static gifconf_func_t * gifconf_list [NPROTO];
2318
2319/**
2320 * register_gifconf - register a SIOCGIF handler
2321 * @family: Address family
2322 * @gifconf: Function handler
2323 *
2324 * Register protocol dependent address dumping routines. The handler
2325 * that is passed must not be freed or reused until it has been replaced
2326 * by another handler.
2327 */
2328int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2329{
2330 if (family >= NPROTO)
2331 return -EINVAL;
2332 gifconf_list[family] = gifconf;
2333 return 0;
2334}
2335
2336
2337/*
2338 * Map an interface index to its name (SIOCGIFNAME)
2339 */
2340
2341/*
2342 * We need this ioctl for efficient implementation of the
2343 * if_indextoname() function required by the IPv6 API. Without
2344 * it, we would have to search all the interfaces to find a
2345 * match. --pb
2346 */
2347
Eric W. Biederman881d9662007-09-17 11:56:21 -07002348static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349{
2350 struct net_device *dev;
2351 struct ifreq ifr;
2352
2353 /*
2354 * Fetch the caller's info block.
2355 */
2356
2357 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2358 return -EFAULT;
2359
2360 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002361 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if (!dev) {
2363 read_unlock(&dev_base_lock);
2364 return -ENODEV;
2365 }
2366
2367 strcpy(ifr.ifr_name, dev->name);
2368 read_unlock(&dev_base_lock);
2369
2370 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2371 return -EFAULT;
2372 return 0;
2373}
2374
2375/*
2376 * Perform a SIOCGIFCONF call. This structure will change
2377 * size eventually, and there is nothing I can do about it.
2378 * Thus we will need a 'compatibility mode'.
2379 */
2380
Eric W. Biederman881d9662007-09-17 11:56:21 -07002381static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382{
2383 struct ifconf ifc;
2384 struct net_device *dev;
2385 char __user *pos;
2386 int len;
2387 int total;
2388 int i;
2389
2390 /*
2391 * Fetch the caller's info block.
2392 */
2393
2394 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2395 return -EFAULT;
2396
2397 pos = ifc.ifc_buf;
2398 len = ifc.ifc_len;
2399
2400 /*
2401 * Loop over the interfaces, and write an info block for each.
2402 */
2403
2404 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002405 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 for (i = 0; i < NPROTO; i++) {
2407 if (gifconf_list[i]) {
2408 int done;
2409 if (!pos)
2410 done = gifconf_list[i](dev, NULL, 0);
2411 else
2412 done = gifconf_list[i](dev, pos + total,
2413 len - total);
2414 if (done < 0)
2415 return -EFAULT;
2416 total += done;
2417 }
2418 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
2421 /*
2422 * All done. Write the updated control block back to the caller.
2423 */
2424 ifc.ifc_len = total;
2425
2426 /*
2427 * Both BSD and Solaris return 0 here, so we do too.
2428 */
2429 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2430}
2431
2432#ifdef CONFIG_PROC_FS
2433/*
2434 * This is invoked by the /proc filesystem handler to display a device
2435 * in detail.
2436 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002438 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439{
Denis V. Luneve372c412007-11-19 22:31:54 -08002440 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002441 loff_t off;
2442 struct net_device *dev;
2443
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002445 if (!*pos)
2446 return SEQ_START_TOKEN;
2447
2448 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002449 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002450 if (off++ == *pos)
2451 return dev;
2452
2453 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454}
2455
2456void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2457{
Denis V. Luneve372c412007-11-19 22:31:54 -08002458 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002460 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002461 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462}
2463
2464void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002465 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466{
2467 read_unlock(&dev_base_lock);
2468}
2469
2470static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2471{
Rusty Russellc45d2862007-03-28 14:29:08 -07002472 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Rusty Russell5a1b5892007-04-28 21:04:03 -07002474 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2475 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2476 dev->name, stats->rx_bytes, stats->rx_packets,
2477 stats->rx_errors,
2478 stats->rx_dropped + stats->rx_missed_errors,
2479 stats->rx_fifo_errors,
2480 stats->rx_length_errors + stats->rx_over_errors +
2481 stats->rx_crc_errors + stats->rx_frame_errors,
2482 stats->rx_compressed, stats->multicast,
2483 stats->tx_bytes, stats->tx_packets,
2484 stats->tx_errors, stats->tx_dropped,
2485 stats->tx_fifo_errors, stats->collisions,
2486 stats->tx_carrier_errors +
2487 stats->tx_aborted_errors +
2488 stats->tx_window_errors +
2489 stats->tx_heartbeat_errors,
2490 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491}
2492
2493/*
2494 * Called from the PROCfs module. This now uses the new arbitrary sized
2495 * /proc/net interface to create /proc/net/dev
2496 */
2497static int dev_seq_show(struct seq_file *seq, void *v)
2498{
2499 if (v == SEQ_START_TOKEN)
2500 seq_puts(seq, "Inter-| Receive "
2501 " | Transmit\n"
2502 " face |bytes packets errs drop fifo frame "
2503 "compressed multicast|bytes packets errs "
2504 "drop fifo colls carrier compressed\n");
2505 else
2506 dev_seq_printf_stats(seq, v);
2507 return 0;
2508}
2509
2510static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2511{
2512 struct netif_rx_stats *rc = NULL;
2513
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002514 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002515 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 rc = &per_cpu(netdev_rx_stat, *pos);
2517 break;
2518 } else
2519 ++*pos;
2520 return rc;
2521}
2522
2523static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2524{
2525 return softnet_get_online(pos);
2526}
2527
2528static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2529{
2530 ++*pos;
2531 return softnet_get_online(pos);
2532}
2533
2534static void softnet_seq_stop(struct seq_file *seq, void *v)
2535{
2536}
2537
2538static int softnet_seq_show(struct seq_file *seq, void *v)
2539{
2540 struct netif_rx_stats *s = v;
2541
2542 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002543 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002544 0, 0, 0, 0, /* was fastroute */
2545 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 return 0;
2547}
2548
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002549static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 .start = dev_seq_start,
2551 .next = dev_seq_next,
2552 .stop = dev_seq_stop,
2553 .show = dev_seq_show,
2554};
2555
2556static int dev_seq_open(struct inode *inode, struct file *file)
2557{
Denis V. Luneve372c412007-11-19 22:31:54 -08002558 return seq_open_net(inode, file, &dev_seq_ops,
2559 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560}
2561
Arjan van de Ven9a321442007-02-12 00:55:35 -08002562static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 .owner = THIS_MODULE,
2564 .open = dev_seq_open,
2565 .read = seq_read,
2566 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002567 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568};
2569
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002570static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 .start = softnet_seq_start,
2572 .next = softnet_seq_next,
2573 .stop = softnet_seq_stop,
2574 .show = softnet_seq_show,
2575};
2576
2577static int softnet_seq_open(struct inode *inode, struct file *file)
2578{
2579 return seq_open(file, &softnet_seq_ops);
2580}
2581
Arjan van de Ven9a321442007-02-12 00:55:35 -08002582static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 .owner = THIS_MODULE,
2584 .open = softnet_seq_open,
2585 .read = seq_read,
2586 .llseek = seq_lseek,
2587 .release = seq_release,
2588};
2589
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002590static void *ptype_get_idx(loff_t pos)
2591{
2592 struct packet_type *pt = NULL;
2593 loff_t i = 0;
2594 int t;
2595
2596 list_for_each_entry_rcu(pt, &ptype_all, list) {
2597 if (i == pos)
2598 return pt;
2599 ++i;
2600 }
2601
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002602 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002603 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2604 if (i == pos)
2605 return pt;
2606 ++i;
2607 }
2608 }
2609 return NULL;
2610}
2611
2612static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002613 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002614{
2615 rcu_read_lock();
2616 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2617}
2618
2619static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2620{
2621 struct packet_type *pt;
2622 struct list_head *nxt;
2623 int hash;
2624
2625 ++*pos;
2626 if (v == SEQ_START_TOKEN)
2627 return ptype_get_idx(0);
2628
2629 pt = v;
2630 nxt = pt->list.next;
2631 if (pt->type == htons(ETH_P_ALL)) {
2632 if (nxt != &ptype_all)
2633 goto found;
2634 hash = 0;
2635 nxt = ptype_base[0].next;
2636 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002637 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002638
2639 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002640 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002641 return NULL;
2642 nxt = ptype_base[hash].next;
2643 }
2644found:
2645 return list_entry(nxt, struct packet_type, list);
2646}
2647
2648static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002649 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002650{
2651 rcu_read_unlock();
2652}
2653
2654static void ptype_seq_decode(struct seq_file *seq, void *sym)
2655{
2656#ifdef CONFIG_KALLSYMS
2657 unsigned long offset = 0, symsize;
2658 const char *symname;
2659 char *modname;
2660 char namebuf[128];
2661
2662 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2663 &modname, namebuf);
2664
2665 if (symname) {
2666 char *delim = ":";
2667
2668 if (!modname)
2669 modname = delim = "";
2670 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2671 symname, offset);
2672 return;
2673 }
2674#endif
2675
2676 seq_printf(seq, "[%p]", sym);
2677}
2678
2679static int ptype_seq_show(struct seq_file *seq, void *v)
2680{
2681 struct packet_type *pt = v;
2682
2683 if (v == SEQ_START_TOKEN)
2684 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002685 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002686 if (pt->type == htons(ETH_P_ALL))
2687 seq_puts(seq, "ALL ");
2688 else
2689 seq_printf(seq, "%04x", ntohs(pt->type));
2690
2691 seq_printf(seq, " %-8s ",
2692 pt->dev ? pt->dev->name : "");
2693 ptype_seq_decode(seq, pt->func);
2694 seq_putc(seq, '\n');
2695 }
2696
2697 return 0;
2698}
2699
2700static const struct seq_operations ptype_seq_ops = {
2701 .start = ptype_seq_start,
2702 .next = ptype_seq_next,
2703 .stop = ptype_seq_stop,
2704 .show = ptype_seq_show,
2705};
2706
2707static int ptype_seq_open(struct inode *inode, struct file *file)
2708{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002709 return seq_open_net(inode, file, &ptype_seq_ops,
2710 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002711}
2712
2713static const struct file_operations ptype_seq_fops = {
2714 .owner = THIS_MODULE,
2715 .open = ptype_seq_open,
2716 .read = seq_read,
2717 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002718 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002719};
2720
2721
Pavel Emelyanov46650792007-10-08 20:38:39 -07002722static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723{
2724 int rc = -ENOMEM;
2725
Eric W. Biederman881d9662007-09-17 11:56:21 -07002726 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002728 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002730 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002731 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002732
Eric W. Biederman881d9662007-09-17 11:56:21 -07002733 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002734 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 rc = 0;
2736out:
2737 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002738out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002739 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002741 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002743 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 goto out;
2745}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002746
Pavel Emelyanov46650792007-10-08 20:38:39 -07002747static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002748{
2749 wext_proc_exit(net);
2750
2751 proc_net_remove(net, "ptype");
2752 proc_net_remove(net, "softnet_stat");
2753 proc_net_remove(net, "dev");
2754}
2755
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002756static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002757 .init = dev_proc_net_init,
2758 .exit = dev_proc_net_exit,
2759};
2760
2761static int __init dev_proc_init(void)
2762{
2763 return register_pernet_subsys(&dev_proc_ops);
2764}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765#else
2766#define dev_proc_init() 0
2767#endif /* CONFIG_PROC_FS */
2768
2769
2770/**
2771 * netdev_set_master - set up master/slave pair
2772 * @slave: slave device
2773 * @master: new master device
2774 *
2775 * Changes the master device of the slave. Pass %NULL to break the
2776 * bonding. The caller must hold the RTNL semaphore. On a failure
2777 * a negative errno code is returned. On success the reference counts
2778 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2779 * function returns zero.
2780 */
2781int netdev_set_master(struct net_device *slave, struct net_device *master)
2782{
2783 struct net_device *old = slave->master;
2784
2785 ASSERT_RTNL();
2786
2787 if (master) {
2788 if (old)
2789 return -EBUSY;
2790 dev_hold(master);
2791 }
2792
2793 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 synchronize_net();
2796
2797 if (old)
2798 dev_put(old);
2799
2800 if (master)
2801 slave->flags |= IFF_SLAVE;
2802 else
2803 slave->flags &= ~IFF_SLAVE;
2804
2805 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2806 return 0;
2807}
2808
Wang Chendad9b332008-06-18 01:48:28 -07002809static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002810{
2811 unsigned short old_flags = dev->flags;
2812
Patrick McHardy24023452007-07-14 18:51:31 -07002813 ASSERT_RTNL();
2814
Wang Chendad9b332008-06-18 01:48:28 -07002815 dev->flags |= IFF_PROMISC;
2816 dev->promiscuity += inc;
2817 if (dev->promiscuity == 0) {
2818 /*
2819 * Avoid overflow.
2820 * If inc causes overflow, untouch promisc and return error.
2821 */
2822 if (inc < 0)
2823 dev->flags &= ~IFF_PROMISC;
2824 else {
2825 dev->promiscuity -= inc;
2826 printk(KERN_WARNING "%s: promiscuity touches roof, "
2827 "set promiscuity failed, promiscuity feature "
2828 "of device might be broken.\n", dev->name);
2829 return -EOVERFLOW;
2830 }
2831 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002832 if (dev->flags != old_flags) {
2833 printk(KERN_INFO "device %s %s promiscuous mode\n",
2834 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2835 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002836 if (audit_enabled)
2837 audit_log(current->audit_context, GFP_ATOMIC,
2838 AUDIT_ANOM_PROMISCUOUS,
2839 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2840 dev->name, (dev->flags & IFF_PROMISC),
2841 (old_flags & IFF_PROMISC),
2842 audit_get_loginuid(current),
2843 current->uid, current->gid,
2844 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002845
2846 if (dev->change_rx_flags)
2847 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002848 }
Wang Chendad9b332008-06-18 01:48:28 -07002849 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002850}
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852/**
2853 * dev_set_promiscuity - update promiscuity count on a device
2854 * @dev: device
2855 * @inc: modifier
2856 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002857 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 * remains above zero the interface remains promiscuous. Once it hits zero
2859 * the device reverts back to normal filtering operation. A negative inc
2860 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002861 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 */
Wang Chendad9b332008-06-18 01:48:28 -07002863int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864{
2865 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002866 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Wang Chendad9b332008-06-18 01:48:28 -07002868 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002869 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002870 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002871 if (dev->flags != old_flags)
2872 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002873 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874}
2875
2876/**
2877 * dev_set_allmulti - update allmulti count on a device
2878 * @dev: device
2879 * @inc: modifier
2880 *
2881 * Add or remove reception of all multicast frames to a device. While the
2882 * count in the device remains above zero the interface remains listening
2883 * to all interfaces. Once it hits zero the device reverts back to normal
2884 * filtering operation. A negative @inc value is used to drop the counter
2885 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002886 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 */
2888
Wang Chendad9b332008-06-18 01:48:28 -07002889int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890{
2891 unsigned short old_flags = dev->flags;
2892
Patrick McHardy24023452007-07-14 18:51:31 -07002893 ASSERT_RTNL();
2894
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07002896 dev->allmulti += inc;
2897 if (dev->allmulti == 0) {
2898 /*
2899 * Avoid overflow.
2900 * If inc causes overflow, untouch allmulti and return error.
2901 */
2902 if (inc < 0)
2903 dev->flags &= ~IFF_ALLMULTI;
2904 else {
2905 dev->allmulti -= inc;
2906 printk(KERN_WARNING "%s: allmulti touches roof, "
2907 "set allmulti failed, allmulti feature of "
2908 "device might be broken.\n", dev->name);
2909 return -EOVERFLOW;
2910 }
2911 }
Patrick McHardy24023452007-07-14 18:51:31 -07002912 if (dev->flags ^ old_flags) {
2913 if (dev->change_rx_flags)
2914 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07002915 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07002916 }
Wang Chendad9b332008-06-18 01:48:28 -07002917 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002918}
2919
2920/*
2921 * Upload unicast and multicast address lists to device and
2922 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08002923 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07002924 * are present.
2925 */
2926void __dev_set_rx_mode(struct net_device *dev)
2927{
2928 /* dev_open will call this function so the list will stay sane. */
2929 if (!(dev->flags&IFF_UP))
2930 return;
2931
2932 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09002933 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07002934
2935 if (dev->set_rx_mode)
2936 dev->set_rx_mode(dev);
2937 else {
2938 /* Unicast addresses changes may only happen under the rtnl,
2939 * therefore calling __dev_set_promiscuity here is safe.
2940 */
2941 if (dev->uc_count > 0 && !dev->uc_promisc) {
2942 __dev_set_promiscuity(dev, 1);
2943 dev->uc_promisc = 1;
2944 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2945 __dev_set_promiscuity(dev, -1);
2946 dev->uc_promisc = 0;
2947 }
2948
2949 if (dev->set_multicast_list)
2950 dev->set_multicast_list(dev);
2951 }
2952}
2953
2954void dev_set_rx_mode(struct net_device *dev)
2955{
2956 netif_tx_lock_bh(dev);
2957 __dev_set_rx_mode(dev);
2958 netif_tx_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959}
2960
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002961int __dev_addr_delete(struct dev_addr_list **list, int *count,
2962 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002963{
2964 struct dev_addr_list *da;
2965
2966 for (; (da = *list) != NULL; list = &da->next) {
2967 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2968 alen == da->da_addrlen) {
2969 if (glbl) {
2970 int old_glbl = da->da_gusers;
2971 da->da_gusers = 0;
2972 if (old_glbl == 0)
2973 break;
2974 }
2975 if (--da->da_users)
2976 return 0;
2977
2978 *list = da->next;
2979 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002980 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07002981 return 0;
2982 }
2983 }
2984 return -ENOENT;
2985}
2986
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07002987int __dev_addr_add(struct dev_addr_list **list, int *count,
2988 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07002989{
2990 struct dev_addr_list *da;
2991
2992 for (da = *list; da != NULL; da = da->next) {
2993 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2994 da->da_addrlen == alen) {
2995 if (glbl) {
2996 int old_glbl = da->da_gusers;
2997 da->da_gusers = 1;
2998 if (old_glbl)
2999 return 0;
3000 }
3001 da->da_users++;
3002 return 0;
3003 }
3004 }
3005
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003006 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003007 if (da == NULL)
3008 return -ENOMEM;
3009 memcpy(da->da_addr, addr, alen);
3010 da->da_addrlen = alen;
3011 da->da_users = 1;
3012 da->da_gusers = glbl ? 1 : 0;
3013 da->next = *list;
3014 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003015 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003016 return 0;
3017}
3018
Patrick McHardy4417da62007-06-27 01:28:10 -07003019/**
3020 * dev_unicast_delete - Release secondary unicast address.
3021 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003022 * @addr: address to delete
3023 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003024 *
3025 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003026 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003027 *
3028 * The caller must hold the rtnl_mutex.
3029 */
3030int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3031{
3032 int err;
3033
3034 ASSERT_RTNL();
3035
3036 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003037 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3038 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003039 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003040 netif_tx_unlock_bh(dev);
3041 return err;
3042}
3043EXPORT_SYMBOL(dev_unicast_delete);
3044
3045/**
3046 * dev_unicast_add - add a secondary unicast address
3047 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003048 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003049 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003050 *
3051 * Add a secondary unicast address to the device or increase
3052 * the reference count if it already exists.
3053 *
3054 * The caller must hold the rtnl_mutex.
3055 */
3056int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3057{
3058 int err;
3059
3060 ASSERT_RTNL();
3061
3062 netif_tx_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003063 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3064 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003065 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003066 netif_tx_unlock_bh(dev);
3067 return err;
3068}
3069EXPORT_SYMBOL(dev_unicast_add);
3070
Chris Leeche83a2ea2008-01-31 16:53:23 -08003071int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3072 struct dev_addr_list **from, int *from_count)
3073{
3074 struct dev_addr_list *da, *next;
3075 int err = 0;
3076
3077 da = *from;
3078 while (da != NULL) {
3079 next = da->next;
3080 if (!da->da_synced) {
3081 err = __dev_addr_add(to, to_count,
3082 da->da_addr, da->da_addrlen, 0);
3083 if (err < 0)
3084 break;
3085 da->da_synced = 1;
3086 da->da_users++;
3087 } else if (da->da_users == 1) {
3088 __dev_addr_delete(to, to_count,
3089 da->da_addr, da->da_addrlen, 0);
3090 __dev_addr_delete(from, from_count,
3091 da->da_addr, da->da_addrlen, 0);
3092 }
3093 da = next;
3094 }
3095 return err;
3096}
3097
3098void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3099 struct dev_addr_list **from, int *from_count)
3100{
3101 struct dev_addr_list *da, *next;
3102
3103 da = *from;
3104 while (da != NULL) {
3105 next = da->next;
3106 if (da->da_synced) {
3107 __dev_addr_delete(to, to_count,
3108 da->da_addr, da->da_addrlen, 0);
3109 da->da_synced = 0;
3110 __dev_addr_delete(from, from_count,
3111 da->da_addr, da->da_addrlen, 0);
3112 }
3113 da = next;
3114 }
3115}
3116
3117/**
3118 * dev_unicast_sync - Synchronize device's unicast list to another device
3119 * @to: destination device
3120 * @from: source device
3121 *
3122 * Add newly added addresses to the destination device and release
3123 * addresses that have no users left. The source device must be
3124 * locked by netif_tx_lock_bh.
3125 *
3126 * This function is intended to be called from the dev->set_rx_mode
3127 * function of layered software devices.
3128 */
3129int dev_unicast_sync(struct net_device *to, struct net_device *from)
3130{
3131 int err = 0;
3132
3133 netif_tx_lock_bh(to);
3134 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3135 &from->uc_list, &from->uc_count);
3136 if (!err)
3137 __dev_set_rx_mode(to);
3138 netif_tx_unlock_bh(to);
3139 return err;
3140}
3141EXPORT_SYMBOL(dev_unicast_sync);
3142
3143/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003144 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003145 * @to: destination device
3146 * @from: source device
3147 *
3148 * Remove all addresses that were added to the destination device by
3149 * dev_unicast_sync(). This function is intended to be called from the
3150 * dev->stop function of layered software devices.
3151 */
3152void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3153{
3154 netif_tx_lock_bh(from);
3155 netif_tx_lock_bh(to);
3156
3157 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3158 &from->uc_list, &from->uc_count);
3159 __dev_set_rx_mode(to);
3160
3161 netif_tx_unlock_bh(to);
3162 netif_tx_unlock_bh(from);
3163}
3164EXPORT_SYMBOL(dev_unicast_unsync);
3165
Denis Cheng12972622007-07-18 02:12:56 -07003166static void __dev_addr_discard(struct dev_addr_list **list)
3167{
3168 struct dev_addr_list *tmp;
3169
3170 while (*list != NULL) {
3171 tmp = *list;
3172 *list = tmp->next;
3173 if (tmp->da_users > tmp->da_gusers)
3174 printk("__dev_addr_discard: address leakage! "
3175 "da_users=%d\n", tmp->da_users);
3176 kfree(tmp);
3177 }
3178}
3179
Denis Cheng26cc2522007-07-18 02:12:03 -07003180static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003181{
3182 netif_tx_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003183
Patrick McHardy4417da62007-06-27 01:28:10 -07003184 __dev_addr_discard(&dev->uc_list);
3185 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003186
Denis Cheng456ad752007-07-18 02:10:54 -07003187 __dev_addr_discard(&dev->mc_list);
3188 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003189
Denis Cheng456ad752007-07-18 02:10:54 -07003190 netif_tx_unlock_bh(dev);
3191}
3192
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193unsigned dev_get_flags(const struct net_device *dev)
3194{
3195 unsigned flags;
3196
3197 flags = (dev->flags & ~(IFF_PROMISC |
3198 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003199 IFF_RUNNING |
3200 IFF_LOWER_UP |
3201 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202 (dev->gflags & (IFF_PROMISC |
3203 IFF_ALLMULTI));
3204
Stefan Rompfb00055a2006-03-20 17:09:11 -08003205 if (netif_running(dev)) {
3206 if (netif_oper_up(dev))
3207 flags |= IFF_RUNNING;
3208 if (netif_carrier_ok(dev))
3209 flags |= IFF_LOWER_UP;
3210 if (netif_dormant(dev))
3211 flags |= IFF_DORMANT;
3212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
3214 return flags;
3215}
3216
3217int dev_change_flags(struct net_device *dev, unsigned flags)
3218{
Thomas Graf7c355f52007-06-05 16:03:03 -07003219 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 int old_flags = dev->flags;
3221
Patrick McHardy24023452007-07-14 18:51:31 -07003222 ASSERT_RTNL();
3223
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 /*
3225 * Set the flags on our device.
3226 */
3227
3228 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3229 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3230 IFF_AUTOMEDIA)) |
3231 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3232 IFF_ALLMULTI));
3233
3234 /*
3235 * Load in the correct multicast list now the flags have changed.
3236 */
3237
David Woodhouse0e917962008-05-20 14:36:14 -07003238 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003239 dev->change_rx_flags(dev, IFF_MULTICAST);
3240
Patrick McHardy4417da62007-06-27 01:28:10 -07003241 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
3243 /*
3244 * Have we downed the interface. We handle IFF_UP ourselves
3245 * according to user attempts to set it, rather than blindly
3246 * setting it.
3247 */
3248
3249 ret = 0;
3250 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3251 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3252
3253 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003254 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 }
3256
3257 if (dev->flags & IFF_UP &&
3258 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3259 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003260 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
3262 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3263 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3264 dev->gflags ^= IFF_PROMISC;
3265 dev_set_promiscuity(dev, inc);
3266 }
3267
3268 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3269 is important. Some (broken) drivers set IFF_PROMISC, when
3270 IFF_ALLMULTI is requested not asking us and not reporting.
3271 */
3272 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3273 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3274 dev->gflags ^= IFF_ALLMULTI;
3275 dev_set_allmulti(dev, inc);
3276 }
3277
Thomas Graf7c355f52007-06-05 16:03:03 -07003278 /* Exclude state transition flags, already notified */
3279 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3280 if (changes)
3281 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283 return ret;
3284}
3285
3286int dev_set_mtu(struct net_device *dev, int new_mtu)
3287{
3288 int err;
3289
3290 if (new_mtu == dev->mtu)
3291 return 0;
3292
3293 /* MTU must be positive. */
3294 if (new_mtu < 0)
3295 return -EINVAL;
3296
3297 if (!netif_device_present(dev))
3298 return -ENODEV;
3299
3300 err = 0;
3301 if (dev->change_mtu)
3302 err = dev->change_mtu(dev, new_mtu);
3303 else
3304 dev->mtu = new_mtu;
3305 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003306 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 return err;
3308}
3309
3310int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3311{
3312 int err;
3313
3314 if (!dev->set_mac_address)
3315 return -EOPNOTSUPP;
3316 if (sa->sa_family != dev->type)
3317 return -EINVAL;
3318 if (!netif_device_present(dev))
3319 return -ENODEV;
3320 err = dev->set_mac_address(dev, sa);
3321 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003322 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 return err;
3324}
3325
3326/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003327 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003329static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330{
3331 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003332 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333
3334 if (!dev)
3335 return -ENODEV;
3336
3337 switch (cmd) {
3338 case SIOCGIFFLAGS: /* Get interface flags */
3339 ifr->ifr_flags = dev_get_flags(dev);
3340 return 0;
3341
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 case SIOCGIFMETRIC: /* Get the metric on the interface
3343 (currently unused) */
3344 ifr->ifr_metric = 0;
3345 return 0;
3346
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 case SIOCGIFMTU: /* Get the MTU of a device */
3348 ifr->ifr_mtu = dev->mtu;
3349 return 0;
3350
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 case SIOCGIFHWADDR:
3352 if (!dev->addr_len)
3353 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3354 else
3355 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3356 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3357 ifr->ifr_hwaddr.sa_family = dev->type;
3358 return 0;
3359
Jeff Garzik14e3e072007-10-08 00:06:32 -07003360 case SIOCGIFSLAVE:
3361 err = -EINVAL;
3362 break;
3363
3364 case SIOCGIFMAP:
3365 ifr->ifr_map.mem_start = dev->mem_start;
3366 ifr->ifr_map.mem_end = dev->mem_end;
3367 ifr->ifr_map.base_addr = dev->base_addr;
3368 ifr->ifr_map.irq = dev->irq;
3369 ifr->ifr_map.dma = dev->dma;
3370 ifr->ifr_map.port = dev->if_port;
3371 return 0;
3372
3373 case SIOCGIFINDEX:
3374 ifr->ifr_ifindex = dev->ifindex;
3375 return 0;
3376
3377 case SIOCGIFTXQLEN:
3378 ifr->ifr_qlen = dev->tx_queue_len;
3379 return 0;
3380
3381 default:
3382 /* dev_ioctl() should ensure this case
3383 * is never reached
3384 */
3385 WARN_ON(1);
3386 err = -EINVAL;
3387 break;
3388
3389 }
3390 return err;
3391}
3392
3393/*
3394 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3395 */
3396static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3397{
3398 int err;
3399 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3400
3401 if (!dev)
3402 return -ENODEV;
3403
3404 switch (cmd) {
3405 case SIOCSIFFLAGS: /* Set interface flags */
3406 return dev_change_flags(dev, ifr->ifr_flags);
3407
3408 case SIOCSIFMETRIC: /* Set the metric on the interface
3409 (currently unused) */
3410 return -EOPNOTSUPP;
3411
3412 case SIOCSIFMTU: /* Set the MTU of a device */
3413 return dev_set_mtu(dev, ifr->ifr_mtu);
3414
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 case SIOCSIFHWADDR:
3416 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3417
3418 case SIOCSIFHWBROADCAST:
3419 if (ifr->ifr_hwaddr.sa_family != dev->type)
3420 return -EINVAL;
3421 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3422 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003423 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 return 0;
3425
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426 case SIOCSIFMAP:
3427 if (dev->set_config) {
3428 if (!netif_device_present(dev))
3429 return -ENODEV;
3430 return dev->set_config(dev, &ifr->ifr_map);
3431 }
3432 return -EOPNOTSUPP;
3433
3434 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003435 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3437 return -EINVAL;
3438 if (!netif_device_present(dev))
3439 return -ENODEV;
3440 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3441 dev->addr_len, 1);
3442
3443 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003444 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3446 return -EINVAL;
3447 if (!netif_device_present(dev))
3448 return -ENODEV;
3449 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3450 dev->addr_len, 1);
3451
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 case SIOCSIFTXQLEN:
3453 if (ifr->ifr_qlen < 0)
3454 return -EINVAL;
3455 dev->tx_queue_len = ifr->ifr_qlen;
3456 return 0;
3457
3458 case SIOCSIFNAME:
3459 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3460 return dev_change_name(dev, ifr->ifr_newname);
3461
3462 /*
3463 * Unknown or private ioctl
3464 */
3465
3466 default:
3467 if ((cmd >= SIOCDEVPRIVATE &&
3468 cmd <= SIOCDEVPRIVATE + 15) ||
3469 cmd == SIOCBONDENSLAVE ||
3470 cmd == SIOCBONDRELEASE ||
3471 cmd == SIOCBONDSETHWADDR ||
3472 cmd == SIOCBONDSLAVEINFOQUERY ||
3473 cmd == SIOCBONDINFOQUERY ||
3474 cmd == SIOCBONDCHANGEACTIVE ||
3475 cmd == SIOCGMIIPHY ||
3476 cmd == SIOCGMIIREG ||
3477 cmd == SIOCSMIIREG ||
3478 cmd == SIOCBRADDIF ||
3479 cmd == SIOCBRDELIF ||
3480 cmd == SIOCWANDEV) {
3481 err = -EOPNOTSUPP;
3482 if (dev->do_ioctl) {
3483 if (netif_device_present(dev))
3484 err = dev->do_ioctl(dev, ifr,
3485 cmd);
3486 else
3487 err = -ENODEV;
3488 }
3489 } else
3490 err = -EINVAL;
3491
3492 }
3493 return err;
3494}
3495
3496/*
3497 * This function handles all "interface"-type I/O control requests. The actual
3498 * 'doing' part of this is dev_ifsioc above.
3499 */
3500
3501/**
3502 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003503 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504 * @cmd: command to issue
3505 * @arg: pointer to a struct ifreq in user space
3506 *
3507 * Issue ioctl functions to devices. This is normally called by the
3508 * user space syscall interfaces but can sometimes be useful for
3509 * other purposes. The return value is the return from the syscall if
3510 * positive or a negative errno code on error.
3511 */
3512
Eric W. Biederman881d9662007-09-17 11:56:21 -07003513int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514{
3515 struct ifreq ifr;
3516 int ret;
3517 char *colon;
3518
3519 /* One special case: SIOCGIFCONF takes ifconf argument
3520 and requires shared lock, because it sleeps writing
3521 to user space.
3522 */
3523
3524 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003525 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003526 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003527 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 return ret;
3529 }
3530 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003531 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
3533 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3534 return -EFAULT;
3535
3536 ifr.ifr_name[IFNAMSIZ-1] = 0;
3537
3538 colon = strchr(ifr.ifr_name, ':');
3539 if (colon)
3540 *colon = 0;
3541
3542 /*
3543 * See which interface the caller is talking about.
3544 */
3545
3546 switch (cmd) {
3547 /*
3548 * These ioctl calls:
3549 * - can be done by all.
3550 * - atomic and do not require locking.
3551 * - return a value
3552 */
3553 case SIOCGIFFLAGS:
3554 case SIOCGIFMETRIC:
3555 case SIOCGIFMTU:
3556 case SIOCGIFHWADDR:
3557 case SIOCGIFSLAVE:
3558 case SIOCGIFMAP:
3559 case SIOCGIFINDEX:
3560 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003561 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003563 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 read_unlock(&dev_base_lock);
3565 if (!ret) {
3566 if (colon)
3567 *colon = ':';
3568 if (copy_to_user(arg, &ifr,
3569 sizeof(struct ifreq)))
3570 ret = -EFAULT;
3571 }
3572 return ret;
3573
3574 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003575 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003577 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 rtnl_unlock();
3579 if (!ret) {
3580 if (colon)
3581 *colon = ':';
3582 if (copy_to_user(arg, &ifr,
3583 sizeof(struct ifreq)))
3584 ret = -EFAULT;
3585 }
3586 return ret;
3587
3588 /*
3589 * These ioctl calls:
3590 * - require superuser power.
3591 * - require strict serialization.
3592 * - return a value
3593 */
3594 case SIOCGMIIPHY:
3595 case SIOCGMIIREG:
3596 case SIOCSIFNAME:
3597 if (!capable(CAP_NET_ADMIN))
3598 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003599 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003601 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 rtnl_unlock();
3603 if (!ret) {
3604 if (colon)
3605 *colon = ':';
3606 if (copy_to_user(arg, &ifr,
3607 sizeof(struct ifreq)))
3608 ret = -EFAULT;
3609 }
3610 return ret;
3611
3612 /*
3613 * These ioctl calls:
3614 * - require superuser power.
3615 * - require strict serialization.
3616 * - do not return a value
3617 */
3618 case SIOCSIFFLAGS:
3619 case SIOCSIFMETRIC:
3620 case SIOCSIFMTU:
3621 case SIOCSIFMAP:
3622 case SIOCSIFHWADDR:
3623 case SIOCSIFSLAVE:
3624 case SIOCADDMULTI:
3625 case SIOCDELMULTI:
3626 case SIOCSIFHWBROADCAST:
3627 case SIOCSIFTXQLEN:
3628 case SIOCSMIIREG:
3629 case SIOCBONDENSLAVE:
3630 case SIOCBONDRELEASE:
3631 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 case SIOCBONDCHANGEACTIVE:
3633 case SIOCBRADDIF:
3634 case SIOCBRDELIF:
3635 if (!capable(CAP_NET_ADMIN))
3636 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003637 /* fall through */
3638 case SIOCBONDSLAVEINFOQUERY:
3639 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003640 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003642 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 rtnl_unlock();
3644 return ret;
3645
3646 case SIOCGIFMEM:
3647 /* Get the per device memory space. We can add this but
3648 * currently do not support it */
3649 case SIOCSIFMEM:
3650 /* Set the per device memory buffer space.
3651 * Not applicable in our case */
3652 case SIOCSIFLINK:
3653 return -EINVAL;
3654
3655 /*
3656 * Unknown or private ioctl.
3657 */
3658 default:
3659 if (cmd == SIOCWANDEV ||
3660 (cmd >= SIOCDEVPRIVATE &&
3661 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003662 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003664 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 rtnl_unlock();
3666 if (!ret && copy_to_user(arg, &ifr,
3667 sizeof(struct ifreq)))
3668 ret = -EFAULT;
3669 return ret;
3670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003672 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003673 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 return -EINVAL;
3675 }
3676}
3677
3678
3679/**
3680 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003681 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 *
3683 * Returns a suitable unique value for a new device interface
3684 * number. The caller must hold the rtnl semaphore or the
3685 * dev_base_lock to be sure it remains unique.
3686 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003687static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688{
3689 static int ifindex;
3690 for (;;) {
3691 if (++ifindex <= 0)
3692 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003693 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 return ifindex;
3695 }
3696}
3697
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698/* Delayed registration/unregisteration */
3699static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003700static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003702static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703{
3704 spin_lock(&net_todo_list_lock);
3705 list_add_tail(&dev->todo_list, &net_todo_list);
3706 spin_unlock(&net_todo_list_lock);
3707}
3708
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003709static void rollback_registered(struct net_device *dev)
3710{
3711 BUG_ON(dev_boot_phase);
3712 ASSERT_RTNL();
3713
3714 /* Some devices call without registering for initialization unwind. */
3715 if (dev->reg_state == NETREG_UNINITIALIZED) {
3716 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3717 "was registered\n", dev->name, dev);
3718
3719 WARN_ON(1);
3720 return;
3721 }
3722
3723 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3724
3725 /* If device is running, close it first. */
3726 dev_close(dev);
3727
3728 /* And unlink it from device chain. */
3729 unlist_netdevice(dev);
3730
3731 dev->reg_state = NETREG_UNREGISTERING;
3732
3733 synchronize_net();
3734
3735 /* Shutdown queueing discipline. */
3736 dev_shutdown(dev);
3737
3738
3739 /* Notify protocols, that we are about to destroy
3740 this device. They should clean all the things.
3741 */
3742 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3743
3744 /*
3745 * Flush the unicast and multicast chains
3746 */
3747 dev_addr_discard(dev);
3748
3749 if (dev->uninit)
3750 dev->uninit(dev);
3751
3752 /* Notifier chain MUST detach us from master device. */
3753 BUG_TRAP(!dev->master);
3754
3755 /* Remove entries from kobject tree */
3756 netdev_unregister_kobject(dev);
3757
3758 synchronize_net();
3759
3760 dev_put(dev);
3761}
3762
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763/**
3764 * register_netdevice - register a network device
3765 * @dev: device to register
3766 *
3767 * Take a completed network device structure and add it to the kernel
3768 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3769 * chain. 0 is returned on success. A negative errno code is returned
3770 * on a failure to set up the device, or if the name is a duplicate.
3771 *
3772 * Callers must hold the rtnl semaphore. You may want
3773 * register_netdev() instead of this.
3774 *
3775 * BUGS:
3776 * The locking appears insufficient to guarantee two parallel registers
3777 * will not get the same name.
3778 */
3779
3780int register_netdevice(struct net_device *dev)
3781{
3782 struct hlist_head *head;
3783 struct hlist_node *p;
3784 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003785 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
3787 BUG_ON(dev_boot_phase);
3788 ASSERT_RTNL();
3789
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003790 might_sleep();
3791
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 /* When net_device's are persistent, this will be fatal. */
3793 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003794 BUG_ON(!dev_net(dev));
3795 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
Herbert Xu932ff272006-06-09 12:20:56 -07003797 spin_lock_init(&dev->_xmit_lock);
Jarek Poplawski723e98b2007-05-15 22:46:18 -07003798 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 dev->xmit_lock_owner = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 dev->iflink = -1;
3802
3803 /* Init, if this function is available */
3804 if (dev->init) {
3805 ret = dev->init(dev);
3806 if (ret) {
3807 if (ret > 0)
3808 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003809 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 }
3811 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003812
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 if (!dev_valid_name(dev->name)) {
3814 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003815 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 }
3817
Eric W. Biederman881d9662007-09-17 11:56:21 -07003818 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 if (dev->iflink == -1)
3820 dev->iflink = dev->ifindex;
3821
3822 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003823 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824 hlist_for_each(p, head) {
3825 struct net_device *d
3826 = hlist_entry(p, struct net_device, name_hlist);
3827 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3828 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003829 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003833 /* Fix illegal checksum combinations */
3834 if ((dev->features & NETIF_F_HW_CSUM) &&
3835 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3836 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3837 dev->name);
3838 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3839 }
3840
3841 if ((dev->features & NETIF_F_NO_CSUM) &&
3842 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3843 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3844 dev->name);
3845 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3846 }
3847
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 /* Fix illegal SG+CSUM combinations. */
3850 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003851 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003852 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853 dev->name);
3854 dev->features &= ~NETIF_F_SG;
3855 }
3856
3857 /* TSO requires that SG is present as well. */
3858 if ((dev->features & NETIF_F_TSO) &&
3859 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003860 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 dev->name);
3862 dev->features &= ~NETIF_F_TSO;
3863 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003864 if (dev->features & NETIF_F_UFO) {
3865 if (!(dev->features & NETIF_F_HW_CSUM)) {
3866 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3867 "NETIF_F_HW_CSUM feature.\n",
3868 dev->name);
3869 dev->features &= ~NETIF_F_UFO;
3870 }
3871 if (!(dev->features & NETIF_F_SG)) {
3872 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3873 "NETIF_F_SG feature.\n",
3874 dev->name);
3875 dev->features &= ~NETIF_F_UFO;
3876 }
3877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07003879 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07003880 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003881 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003882 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003883 dev->reg_state = NETREG_REGISTERED;
3884
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 /*
3886 * Default initial state at registry is that the
3887 * device is present.
3888 */
3889
3890 set_bit(__LINK_STATE_PRESENT, &dev->state);
3891
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02003894 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895
3896 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003897 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07003898 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003899 if (ret) {
3900 rollback_registered(dev);
3901 dev->reg_state = NETREG_UNREGISTERED;
3902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903
3904out:
3905 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003906
3907err_uninit:
3908 if (dev->uninit)
3909 dev->uninit(dev);
3910 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911}
3912
3913/**
3914 * register_netdev - register a network device
3915 * @dev: device to register
3916 *
3917 * Take a completed network device structure and add it to the kernel
3918 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3919 * chain. 0 is returned on success. A negative errno code is returned
3920 * on a failure to set up the device, or if the name is a duplicate.
3921 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07003922 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 * and expands the device name if you passed a format string to
3924 * alloc_netdev.
3925 */
3926int register_netdev(struct net_device *dev)
3927{
3928 int err;
3929
3930 rtnl_lock();
3931
3932 /*
3933 * If the name is a format string the caller wants us to do a
3934 * name allocation.
3935 */
3936 if (strchr(dev->name, '%')) {
3937 err = dev_alloc_name(dev, dev->name);
3938 if (err < 0)
3939 goto out;
3940 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003941
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942 err = register_netdevice(dev);
3943out:
3944 rtnl_unlock();
3945 return err;
3946}
3947EXPORT_SYMBOL(register_netdev);
3948
3949/*
3950 * netdev_wait_allrefs - wait until all references are gone.
3951 *
3952 * This is called when unregistering network devices.
3953 *
3954 * Any protocol or device that holds a reference should register
3955 * for netdevice notification, and cleanup and put back the
3956 * reference if they receive an UNREGISTER event.
3957 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003958 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 */
3960static void netdev_wait_allrefs(struct net_device *dev)
3961{
3962 unsigned long rebroadcast_time, warning_time;
3963
3964 rebroadcast_time = warning_time = jiffies;
3965 while (atomic_read(&dev->refcnt) != 0) {
3966 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003967 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
3969 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003970 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971
3972 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3973 &dev->state)) {
3974 /* We must not have linkwatch events
3975 * pending on unregister. If this
3976 * happens, we simply run the queue
3977 * unscheduled, resulting in a noop
3978 * for this device.
3979 */
3980 linkwatch_run_queue();
3981 }
3982
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003983 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984
3985 rebroadcast_time = jiffies;
3986 }
3987
3988 msleep(250);
3989
3990 if (time_after(jiffies, warning_time + 10 * HZ)) {
3991 printk(KERN_EMERG "unregister_netdevice: "
3992 "waiting for %s to become free. Usage "
3993 "count = %d\n",
3994 dev->name, atomic_read(&dev->refcnt));
3995 warning_time = jiffies;
3996 }
3997 }
3998}
3999
4000/* The sequence is:
4001 *
4002 * rtnl_lock();
4003 * ...
4004 * register_netdevice(x1);
4005 * register_netdevice(x2);
4006 * ...
4007 * unregister_netdevice(y1);
4008 * unregister_netdevice(y2);
4009 * ...
4010 * rtnl_unlock();
4011 * free_netdev(y1);
4012 * free_netdev(y2);
4013 *
4014 * We are invoked by rtnl_unlock() after it drops the semaphore.
4015 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004016 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017 * without deadlocking with linkwatch via keventd.
4018 * 2) Since we run with the RTNL semaphore not held, we can sleep
4019 * safely in order to wait for the netdev refcnt to drop to zero.
4020 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004021static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022void netdev_run_todo(void)
4023{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004024 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
4026 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004027 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028
4029 /* Not safe to do outside the semaphore. We must not return
4030 * until all unregister events invoked by the local processor
4031 * have been completed (either by this todo run, or one on
4032 * another cpu).
4033 */
4034 if (list_empty(&net_todo_list))
4035 goto out;
4036
4037 /* Snapshot list, allow later requests */
4038 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004039 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004041
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 while (!list_empty(&list)) {
4043 struct net_device *dev
4044 = list_entry(list.next, struct net_device, todo_list);
4045 list_del(&dev->todo_list);
4046
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004047 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 printk(KERN_ERR "network todo '%s' but state %d\n",
4049 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004050 dump_stack();
4051 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004053
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004054 dev->reg_state = NETREG_UNREGISTERED;
4055
4056 netdev_wait_allrefs(dev);
4057
4058 /* paranoia */
4059 BUG_ON(atomic_read(&dev->refcnt));
4060 BUG_TRAP(!dev->ip_ptr);
4061 BUG_TRAP(!dev->ip6_ptr);
4062 BUG_TRAP(!dev->dn_ptr);
4063
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004064 if (dev->destructor)
4065 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004066
4067 /* Free network device */
4068 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004069 }
4070
4071out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004072 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073}
4074
Rusty Russell5a1b5892007-04-28 21:04:03 -07004075static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004076{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004077 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004078}
4079
David S. Millerdc2b4842008-07-08 17:18:23 -07004080static void netdev_init_one_queue(struct net_device *dev,
4081 struct netdev_queue *queue)
4082{
4083 spin_lock_init(&queue->lock);
4084 queue->dev = dev;
4085}
4086
David S. Millerbb949fb2008-07-08 16:55:56 -07004087static void netdev_init_queues(struct net_device *dev)
4088{
David S. Millerdc2b4842008-07-08 17:18:23 -07004089 netdev_init_one_queue(dev, &dev->rx_queue);
4090 netdev_init_one_queue(dev, &dev->tx_queue);
David S. Millerbb949fb2008-07-08 16:55:56 -07004091}
4092
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004094 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095 * @sizeof_priv: size of private data to allocate space for
4096 * @name: device name format string
4097 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004098 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099 *
4100 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004101 * and performs basic initialization. Also allocates subquue structs
4102 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004104struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4105 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106{
4107 void *p;
4108 struct net_device *dev;
4109 int alloc_size;
4110
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004111 BUG_ON(strlen(name) >= sizeof(dev->name));
4112
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004113 alloc_size = sizeof(struct net_device) +
4114 sizeof(struct net_device_subqueue) * (queue_count - 1);
4115 if (sizeof_priv) {
4116 /* ensure 32-byte alignment of private area */
4117 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4118 alloc_size += sizeof_priv;
4119 }
4120 /* ensure 32-byte alignment of whole construct */
4121 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004123 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004125 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 return NULL;
4127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128
4129 dev = (struct net_device *)
4130 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4131 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004132 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004134 if (sizeof_priv) {
4135 dev->priv = ((char *)dev +
4136 ((sizeof(struct net_device) +
4137 (sizeof(struct net_device_subqueue) *
Patrick McHardy31ce72a2007-07-20 19:45:45 -07004138 (queue_count - 1)) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004139 & ~NETDEV_ALIGN_CONST));
4140 }
4141
4142 dev->egress_subqueue_count = queue_count;
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004143 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
David S. Millerbb949fb2008-07-08 16:55:56 -07004145 netdev_init_queues(dev);
4146
Rusty Russell5a1b5892007-04-28 21:04:03 -07004147 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004148 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 setup(dev);
4150 strcpy(dev->name, name);
4151 return dev;
4152}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004153EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154
4155/**
4156 * free_netdev - free network device
4157 * @dev: device
4158 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004159 * This function does the last stage of destroying an allocated device
4160 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 * If this is the last reference then it will be freed.
4162 */
4163void free_netdev(struct net_device *dev)
4164{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004165 release_net(dev_net(dev));
4166
Stephen Hemminger3041a062006-05-26 13:25:24 -07004167 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 if (dev->reg_state == NETREG_UNINITIALIZED) {
4169 kfree((char *)dev - dev->padded);
4170 return;
4171 }
4172
4173 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4174 dev->reg_state = NETREG_RELEASED;
4175
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004176 /* will free via device release */
4177 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004179
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004181void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182{
4183 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004184 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185}
4186
4187/**
4188 * unregister_netdevice - remove device from the kernel
4189 * @dev: device
4190 *
4191 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004192 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 *
4194 * Callers must hold the rtnl semaphore. You may want
4195 * unregister_netdev() instead of this.
4196 */
4197
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004198void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199{
Herbert Xua6620712007-12-12 19:21:56 -08004200 ASSERT_RTNL();
4201
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004202 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 /* Finish processing unregister after unlock */
4204 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205}
4206
4207/**
4208 * unregister_netdev - remove device from the kernel
4209 * @dev: device
4210 *
4211 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004212 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 *
4214 * This is just a wrapper for unregister_netdevice that takes
4215 * the rtnl semaphore. In general you want to use this and not
4216 * unregister_netdevice.
4217 */
4218void unregister_netdev(struct net_device *dev)
4219{
4220 rtnl_lock();
4221 unregister_netdevice(dev);
4222 rtnl_unlock();
4223}
4224
4225EXPORT_SYMBOL(unregister_netdev);
4226
Eric W. Biedermance286d32007-09-12 13:53:49 +02004227/**
4228 * dev_change_net_namespace - move device to different nethost namespace
4229 * @dev: device
4230 * @net: network namespace
4231 * @pat: If not NULL name pattern to try if the current device name
4232 * is already taken in the destination network namespace.
4233 *
4234 * This function shuts down a device interface and moves it
4235 * to a new network namespace. On success 0 is returned, on
4236 * a failure a netagive errno code is returned.
4237 *
4238 * Callers must hold the rtnl semaphore.
4239 */
4240
4241int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4242{
4243 char buf[IFNAMSIZ];
4244 const char *destname;
4245 int err;
4246
4247 ASSERT_RTNL();
4248
4249 /* Don't allow namespace local devices to be moved. */
4250 err = -EINVAL;
4251 if (dev->features & NETIF_F_NETNS_LOCAL)
4252 goto out;
4253
4254 /* Ensure the device has been registrered */
4255 err = -EINVAL;
4256 if (dev->reg_state != NETREG_REGISTERED)
4257 goto out;
4258
4259 /* Get out if there is nothing todo */
4260 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004261 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004262 goto out;
4263
4264 /* Pick the destination device name, and ensure
4265 * we can use it in the destination network namespace.
4266 */
4267 err = -EEXIST;
4268 destname = dev->name;
4269 if (__dev_get_by_name(net, destname)) {
4270 /* We get here if we can't use the current device name */
4271 if (!pat)
4272 goto out;
4273 if (!dev_valid_name(pat))
4274 goto out;
4275 if (strchr(pat, '%')) {
4276 if (__dev_alloc_name(net, pat, buf) < 0)
4277 goto out;
4278 destname = buf;
4279 } else
4280 destname = pat;
4281 if (__dev_get_by_name(net, destname))
4282 goto out;
4283 }
4284
4285 /*
4286 * And now a mini version of register_netdevice unregister_netdevice.
4287 */
4288
4289 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004290 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004291
4292 /* And unlink it from device chain */
4293 err = -ENODEV;
4294 unlist_netdevice(dev);
4295
4296 synchronize_net();
4297
4298 /* Shutdown queueing discipline. */
4299 dev_shutdown(dev);
4300
4301 /* Notify protocols, that we are about to destroy
4302 this device. They should clean all the things.
4303 */
4304 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4305
4306 /*
4307 * Flush the unicast and multicast chains
4308 */
4309 dev_addr_discard(dev);
4310
4311 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004312 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004313
4314 /* Assign the new device name */
4315 if (destname != dev->name)
4316 strcpy(dev->name, destname);
4317
4318 /* If there is an ifindex conflict assign a new one */
4319 if (__dev_get_by_index(net, dev->ifindex)) {
4320 int iflink = (dev->iflink == dev->ifindex);
4321 dev->ifindex = dev_new_index(net);
4322 if (iflink)
4323 dev->iflink = dev->ifindex;
4324 }
4325
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004326 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004327 netdev_unregister_kobject(dev);
4328 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004329 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004330
4331 /* Add the device back in the hashes */
4332 list_netdevice(dev);
4333
4334 /* Notify protocols, that a new device appeared. */
4335 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4336
4337 synchronize_net();
4338 err = 0;
4339out:
4340 return err;
4341}
4342
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343static int dev_cpu_callback(struct notifier_block *nfb,
4344 unsigned long action,
4345 void *ocpu)
4346{
4347 struct sk_buff **list_skb;
David S. Milleree609cb2008-07-08 22:58:37 -07004348 struct netdev_queue **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349 struct sk_buff *skb;
4350 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4351 struct softnet_data *sd, *oldsd;
4352
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004353 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 return NOTIFY_OK;
4355
4356 local_irq_disable();
4357 cpu = smp_processor_id();
4358 sd = &per_cpu(softnet_data, cpu);
4359 oldsd = &per_cpu(softnet_data, oldcpu);
4360
4361 /* Find end of our completion_queue. */
4362 list_skb = &sd->completion_queue;
4363 while (*list_skb)
4364 list_skb = &(*list_skb)->next;
4365 /* Append completion queue from offline CPU. */
4366 *list_skb = oldsd->completion_queue;
4367 oldsd->completion_queue = NULL;
4368
4369 /* Find end of our output_queue. */
4370 list_net = &sd->output_queue;
4371 while (*list_net)
4372 list_net = &(*list_net)->next_sched;
4373 /* Append output queue from offline CPU. */
4374 *list_net = oldsd->output_queue;
4375 oldsd->output_queue = NULL;
4376
4377 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4378 local_irq_enable();
4379
4380 /* Process offline CPU's input_pkt_queue */
4381 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4382 netif_rx(skb);
4383
4384 return NOTIFY_OK;
4385}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386
Chris Leechdb217332006-06-17 21:24:58 -07004387#ifdef CONFIG_NET_DMA
4388/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004389 * net_dma_rebalance - try to maintain one DMA channel per CPU
4390 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4391 *
4392 * This is called when the number of channels allocated to the net_dma client
4393 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004394 */
Dan Williamsd379b012007-07-09 11:56:42 -07004395
4396static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004397{
Dan Williamsd379b012007-07-09 11:56:42 -07004398 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004399 struct dma_chan *chan;
4400
Dan Williamsd379b012007-07-09 11:56:42 -07004401 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004402 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004403 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004404 return;
4405 }
4406
4407 i = 0;
4408 cpu = first_cpu(cpu_online_map);
4409
Dan Williamsd379b012007-07-09 11:56:42 -07004410 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4411 chan = net_dma->channels[chan_idx];
4412
4413 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4414 + (i < (num_online_cpus() %
4415 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004416
4417 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004418 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004419 cpu = next_cpu(cpu, cpu_online_map);
4420 n--;
4421 }
4422 i++;
4423 }
Chris Leechdb217332006-06-17 21:24:58 -07004424}
4425
4426/**
4427 * netdev_dma_event - event callback for the net_dma_client
4428 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004429 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004430 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004431 */
Dan Williamsd379b012007-07-09 11:56:42 -07004432static enum dma_state_client
4433netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4434 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004435{
Dan Williamsd379b012007-07-09 11:56:42 -07004436 int i, found = 0, pos = -1;
4437 struct net_dma *net_dma =
4438 container_of(client, struct net_dma, client);
4439 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4440
4441 spin_lock(&net_dma->lock);
4442 switch (state) {
4443 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004444 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004445 if (net_dma->channels[i] == chan) {
4446 found = 1;
4447 break;
4448 } else if (net_dma->channels[i] == NULL && pos < 0)
4449 pos = i;
4450
4451 if (!found && pos >= 0) {
4452 ack = DMA_ACK;
4453 net_dma->channels[pos] = chan;
4454 cpu_set(pos, net_dma->channel_mask);
4455 net_dma_rebalance(net_dma);
4456 }
Chris Leechdb217332006-06-17 21:24:58 -07004457 break;
4458 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004459 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004460 if (net_dma->channels[i] == chan) {
4461 found = 1;
4462 pos = i;
4463 break;
4464 }
4465
4466 if (found) {
4467 ack = DMA_ACK;
4468 cpu_clear(pos, net_dma->channel_mask);
4469 net_dma->channels[i] = NULL;
4470 net_dma_rebalance(net_dma);
4471 }
Chris Leechdb217332006-06-17 21:24:58 -07004472 break;
4473 default:
4474 break;
4475 }
Dan Williamsd379b012007-07-09 11:56:42 -07004476 spin_unlock(&net_dma->lock);
4477
4478 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004479}
4480
4481/**
4482 * netdev_dma_regiser - register the networking subsystem as a DMA client
4483 */
4484static int __init netdev_dma_register(void)
4485{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004486 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4487 GFP_KERNEL);
4488 if (unlikely(!net_dma.channels)) {
4489 printk(KERN_NOTICE
4490 "netdev_dma: no memory for net_dma.channels\n");
4491 return -ENOMEM;
4492 }
Dan Williamsd379b012007-07-09 11:56:42 -07004493 spin_lock_init(&net_dma.lock);
4494 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4495 dma_async_client_register(&net_dma.client);
4496 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004497 return 0;
4498}
4499
4500#else
4501static int __init netdev_dma_register(void) { return -ENODEV; }
4502#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503
Herbert Xu7f353bf2007-08-10 15:47:58 -07004504/**
4505 * netdev_compute_feature - compute conjunction of two feature sets
4506 * @all: first feature set
4507 * @one: second feature set
4508 *
4509 * Computes a new feature set after adding a device with feature set
4510 * @one to the master device with current feature set @all. Returns
4511 * the new feature set.
4512 */
4513int netdev_compute_features(unsigned long all, unsigned long one)
4514{
4515 /* if device needs checksumming, downgrade to hw checksumming */
4516 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4517 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4518
4519 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4520 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4521 all ^= NETIF_F_HW_CSUM
4522 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4523
4524 if (one & NETIF_F_GSO)
4525 one |= NETIF_F_GSO_SOFTWARE;
4526 one |= NETIF_F_GSO;
4527
4528 /* If even one device supports robust GSO, enable it for all. */
4529 if (one & NETIF_F_GSO_ROBUST)
4530 all |= NETIF_F_GSO_ROBUST;
4531
4532 all &= one | NETIF_F_LLTX;
4533
4534 if (!(all & NETIF_F_ALL_CSUM))
4535 all &= ~NETIF_F_SG;
4536 if (!(all & NETIF_F_SG))
4537 all &= ~NETIF_F_GSO_MASK;
4538
4539 return all;
4540}
4541EXPORT_SYMBOL(netdev_compute_features);
4542
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004543static struct hlist_head *netdev_create_hash(void)
4544{
4545 int i;
4546 struct hlist_head *hash;
4547
4548 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4549 if (hash != NULL)
4550 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4551 INIT_HLIST_HEAD(&hash[i]);
4552
4553 return hash;
4554}
4555
Eric W. Biederman881d9662007-09-17 11:56:21 -07004556/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004557static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004558{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004559 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004560
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004561 net->dev_name_head = netdev_create_hash();
4562 if (net->dev_name_head == NULL)
4563 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004564
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004565 net->dev_index_head = netdev_create_hash();
4566 if (net->dev_index_head == NULL)
4567 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004568
4569 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004570
4571err_idx:
4572 kfree(net->dev_name_head);
4573err_name:
4574 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004575}
4576
Pavel Emelyanov46650792007-10-08 20:38:39 -07004577static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004578{
4579 kfree(net->dev_name_head);
4580 kfree(net->dev_index_head);
4581}
4582
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004583static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004584 .init = netdev_init,
4585 .exit = netdev_exit,
4586};
4587
Pavel Emelyanov46650792007-10-08 20:38:39 -07004588static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004589{
4590 struct net_device *dev, *next;
4591 /*
4592 * Push all migratable of the network devices back to the
4593 * initial network namespace
4594 */
4595 rtnl_lock();
4596 for_each_netdev_safe(net, dev, next) {
4597 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004598 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004599
4600 /* Ignore unmoveable devices (i.e. loopback) */
4601 if (dev->features & NETIF_F_NETNS_LOCAL)
4602 continue;
4603
4604 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004605 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4606 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004607 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004608 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004609 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004610 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004611 }
4612 }
4613 rtnl_unlock();
4614}
4615
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004616static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004617 .exit = default_device_exit,
4618};
4619
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620/*
4621 * Initialize the DEV module. At boot time this walks the device list and
4622 * unhooks any devices that fail to initialise (normally hardware not
4623 * present) and leaves us with a valid list of present and active devices.
4624 *
4625 */
4626
4627/*
4628 * This is called single threaded during boot, so no need
4629 * to take the rtnl semaphore.
4630 */
4631static int __init net_dev_init(void)
4632{
4633 int i, rc = -ENOMEM;
4634
4635 BUG_ON(!dev_boot_phase);
4636
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 if (dev_proc_init())
4638 goto out;
4639
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004640 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 goto out;
4642
4643 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004644 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 INIT_LIST_HEAD(&ptype_base[i]);
4646
Eric W. Biederman881d9662007-09-17 11:56:21 -07004647 if (register_pernet_subsys(&netdev_net_ops))
4648 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649
Eric W. Biedermance286d32007-09-12 13:53:49 +02004650 if (register_pernet_device(&default_device_ops))
4651 goto out;
4652
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653 /*
4654 * Initialise the packet receive queues.
4655 */
4656
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004657 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 struct softnet_data *queue;
4659
4660 queue = &per_cpu(softnet_data, i);
4661 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 queue->completion_queue = NULL;
4663 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004664
4665 queue->backlog.poll = process_backlog;
4666 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 }
4668
Chris Leechdb217332006-06-17 21:24:58 -07004669 netdev_dma_register();
4670
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 dev_boot_phase = 0;
4672
4673 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4674 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4675
4676 hotcpu_notifier(dev_cpu_callback, 0);
4677 dst_init();
4678 dev_mcast_init();
4679 rc = 0;
4680out:
4681 return rc;
4682}
4683
4684subsys_initcall(net_dev_init);
4685
4686EXPORT_SYMBOL(__dev_get_by_index);
4687EXPORT_SYMBOL(__dev_get_by_name);
4688EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004689EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004690EXPORT_SYMBOL(dev_add_pack);
4691EXPORT_SYMBOL(dev_alloc_name);
4692EXPORT_SYMBOL(dev_close);
4693EXPORT_SYMBOL(dev_get_by_flags);
4694EXPORT_SYMBOL(dev_get_by_index);
4695EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696EXPORT_SYMBOL(dev_open);
4697EXPORT_SYMBOL(dev_queue_xmit);
4698EXPORT_SYMBOL(dev_remove_pack);
4699EXPORT_SYMBOL(dev_set_allmulti);
4700EXPORT_SYMBOL(dev_set_promiscuity);
4701EXPORT_SYMBOL(dev_change_flags);
4702EXPORT_SYMBOL(dev_set_mtu);
4703EXPORT_SYMBOL(dev_set_mac_address);
4704EXPORT_SYMBOL(free_netdev);
4705EXPORT_SYMBOL(netdev_boot_setup_check);
4706EXPORT_SYMBOL(netdev_set_master);
4707EXPORT_SYMBOL(netdev_state_change);
4708EXPORT_SYMBOL(netif_receive_skb);
4709EXPORT_SYMBOL(netif_rx);
4710EXPORT_SYMBOL(register_gifconf);
4711EXPORT_SYMBOL(register_netdevice);
4712EXPORT_SYMBOL(register_netdevice_notifier);
4713EXPORT_SYMBOL(skb_checksum_help);
4714EXPORT_SYMBOL(synchronize_net);
4715EXPORT_SYMBOL(unregister_netdevice);
4716EXPORT_SYMBOL(unregister_netdevice_notifier);
4717EXPORT_SYMBOL(net_enable_timestamp);
4718EXPORT_SYMBOL(net_disable_timestamp);
4719EXPORT_SYMBOL(dev_get_flags);
4720
4721#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4722EXPORT_SYMBOL(br_handle_frame_hook);
4723EXPORT_SYMBOL(br_fdb_get_hook);
4724EXPORT_SYMBOL(br_fdb_put_hook);
4725#endif
4726
4727#ifdef CONFIG_KMOD
4728EXPORT_SYMBOL(dev_load);
4729#endif
4730
4731EXPORT_PER_CPU_SYMBOL(softnet_data);