blob: 0b909b74f698af0c0c25f1e6d4a1b625c0713277 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700115#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500118#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700119#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700120#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700121#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700122#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700123#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700128#include "net-sysfs.h"
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
131 * The list of packet types we will receive (as opposed to discard)
132 * and the routines to invoke.
133 *
134 * Why 16. Because with 16 the only overlap we get on a hash of the
135 * low nibble of the protocol value is RARP/SNAP/X.25.
136 *
137 * NOTE: That is no longer true with the addition of VLAN tags. Not
138 * sure which should go first, but I bet it won't make much
139 * difference if we are running VLANs. The good news is that
140 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700141 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 * --BLG
143 *
144 * 0800 IP
145 * 8100 802.1Q VLAN
146 * 0001 802.3
147 * 0002 AX.25
148 * 0004 802.2
149 * 8035 RARP
150 * 0005 SNAP
151 * 0805 X.25
152 * 0806 ARP
153 * 8137 IPX
154 * 0009 Localtalk
155 * 86DD IPv6
156 */
157
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800158#define PTYPE_HASH_SIZE (16)
159#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800162static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700163static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Chris Leechdb217332006-06-17 21:24:58 -0700165#ifdef CONFIG_NET_DMA
Dan Williamsd379b012007-07-09 11:56:42 -0700166struct net_dma {
167 struct dma_client client;
168 spinlock_t lock;
169 cpumask_t channel_mask;
Mike Travis0c0b0ac2008-05-02 16:43:08 -0700170 struct dma_chan **channels;
Dan Williamsd379b012007-07-09 11:56:42 -0700171};
172
173static enum dma_state_client
174netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
175 enum dma_state state);
176
177static struct net_dma net_dma = {
178 .client = {
179 .event_callback = netdev_dma_event,
180 },
181};
Chris Leechdb217332006-06-17 21:24:58 -0700182#endif
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * semaphore.
187 *
188 * Pure readers hold dev_base_lock for reading.
189 *
190 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700191 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
194 *
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
198 *
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
201 * semaphore held.
202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203DEFINE_RWLOCK(dev_base_lock);
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205EXPORT_SYMBOL(dev_base_lock);
206
207#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Eric W. Biederman881d9662007-09-17 11:56:21 -0700210static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
212 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700213 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Eric W. Biederman881d9662007-09-17 11:56:21 -0700216static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700218 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
Eric W. Biedermance286d32007-09-12 13:53:49 +0200221/* Device list insertion */
222static int list_netdevice(struct net_device *dev)
223{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900224 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200225
226 ASSERT_RTNL();
227
228 write_lock_bh(&dev_base_lock);
229 list_add_tail(&dev->dev_list, &net->dev_base_head);
230 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
231 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
232 write_unlock_bh(&dev_base_lock);
233 return 0;
234}
235
236/* Device list removal */
237static void unlist_netdevice(struct net_device *dev)
238{
239 ASSERT_RTNL();
240
241 /* Unlink dev from the device chain */
242 write_lock_bh(&dev_base_lock);
243 list_del(&dev->dev_list);
244 hlist_del(&dev->name_hlist);
245 hlist_del(&dev->index_hlist);
246 write_unlock_bh(&dev_base_lock);
247}
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249/*
250 * Our notifier list
251 */
252
Alan Sternf07d5b92006-05-09 15:23:03 -0700253static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255/*
256 * Device drivers call our routines to queue packets here. We empty the
257 * queue in the local softnet handler.
258 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700259
260DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700262#ifdef CONFIG_DEBUG_LOCK_ALLOC
263/*
David S. Millerc773e842008-07-08 23:13:53 -0700264 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700265 * according to dev->type
266 */
267static const unsigned short netdev_lock_type[] =
268 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
269 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
270 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
271 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
272 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
273 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
274 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
275 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
276 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
277 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
278 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
279 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
280 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
281 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
282 ARPHRD_NONE};
283
284static const char *netdev_lock_name[] =
285 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
286 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
287 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
288 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
289 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
290 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
291 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
292 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
293 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
294 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
295 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
296 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
297 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
298 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
299 "_xmit_NONE"};
300
301static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
302
303static inline unsigned short netdev_lock_pos(unsigned short dev_type)
304{
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
308 if (netdev_lock_type[i] == dev_type)
309 return i;
310 /* the last key is used by default */
311 return ARRAY_SIZE(netdev_lock_type) - 1;
312}
313
314static inline void netdev_set_lockdep_class(spinlock_t *lock,
315 unsigned short dev_type)
316{
317 int i;
318
319 i = netdev_lock_pos(dev_type);
320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
321 netdev_lock_name[i]);
322}
323#else
324static inline void netdev_set_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
326{
327}
328#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/*******************************************************************************
331
332 Protocol management and registration routines
333
334*******************************************************************************/
335
336/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 * Add a protocol ID to the list. Now that the input handler is
338 * smarter we can dispense with all the messy stuff that used to be
339 * here.
340 *
341 * BEWARE!!! Protocol handlers, mangling input packets,
342 * MUST BE last in hash buckets and checking protocol handlers
343 * MUST start from promiscuous ptype_all chain in net_bh.
344 * It is true now, do not change it.
345 * Explanation follows: if protocol handler, mangling packet, will
346 * be the first on list, it is not able to sense, that packet
347 * is cloned and should be copied-on-write, so that it will
348 * change it and subsequent readers will get broken packet.
349 * --ANK (980803)
350 */
351
352/**
353 * dev_add_pack - add packet handler
354 * @pt: packet type declaration
355 *
356 * Add a protocol handler to the networking stack. The passed &packet_type
357 * is linked into kernel lists and may not be freed until it has been
358 * removed from the kernel lists.
359 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900360 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 * guarantee all CPU's that are in middle of receiving packets
362 * will see the new packet type (until the next received packet).
363 */
364
365void dev_add_pack(struct packet_type *pt)
366{
367 int hash;
368
369 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700370 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700372 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800373 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 list_add_rcu(&pt->list, &ptype_base[hash]);
375 }
376 spin_unlock_bh(&ptype_lock);
377}
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379/**
380 * __dev_remove_pack - remove packet handler
381 * @pt: packet type declaration
382 *
383 * Remove a protocol handler that was previously added to the kernel
384 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
385 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900386 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 *
388 * The packet type might still be in use by receivers
389 * and must not be freed until after all the CPU's have gone
390 * through a quiescent state.
391 */
392void __dev_remove_pack(struct packet_type *pt)
393{
394 struct list_head *head;
395 struct packet_type *pt1;
396
397 spin_lock_bh(&ptype_lock);
398
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700399 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700401 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800402 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 list_for_each_entry(pt1, head, list) {
405 if (pt == pt1) {
406 list_del_rcu(&pt->list);
407 goto out;
408 }
409 }
410
411 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
412out:
413 spin_unlock_bh(&ptype_lock);
414}
415/**
416 * dev_remove_pack - remove packet handler
417 * @pt: packet type declaration
418 *
419 * Remove a protocol handler that was previously added to the kernel
420 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
421 * from the kernel lists and can be freed or reused once this function
422 * returns.
423 *
424 * This call sleeps to guarantee that no CPU is looking at the packet
425 * type after return.
426 */
427void dev_remove_pack(struct packet_type *pt)
428{
429 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 synchronize_net();
432}
433
434/******************************************************************************
435
436 Device Boot-time Settings Routines
437
438*******************************************************************************/
439
440/* Boot time configuration table */
441static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
442
443/**
444 * netdev_boot_setup_add - add new setup entry
445 * @name: name of the device
446 * @map: configured settings for the device
447 *
448 * Adds new setup entry to the dev_boot_setup list. The function
449 * returns 0 on error and 1 on success. This is a generic routine to
450 * all netdevices.
451 */
452static int netdev_boot_setup_add(char *name, struct ifmap *map)
453{
454 struct netdev_boot_setup *s;
455 int i;
456
457 s = dev_boot_setup;
458 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
459 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
460 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700461 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 memcpy(&s[i].map, map, sizeof(s[i].map));
463 break;
464 }
465 }
466
467 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
468}
469
470/**
471 * netdev_boot_setup_check - check boot time settings
472 * @dev: the netdevice
473 *
474 * Check boot time settings for the device.
475 * The found settings are set for the device to be used
476 * later in the device probing.
477 * Returns 0 if no settings found, 1 if they are.
478 */
479int netdev_boot_setup_check(struct net_device *dev)
480{
481 struct netdev_boot_setup *s = dev_boot_setup;
482 int i;
483
484 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
485 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700486 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 dev->irq = s[i].map.irq;
488 dev->base_addr = s[i].map.base_addr;
489 dev->mem_start = s[i].map.mem_start;
490 dev->mem_end = s[i].map.mem_end;
491 return 1;
492 }
493 }
494 return 0;
495}
496
497
498/**
499 * netdev_boot_base - get address from boot time settings
500 * @prefix: prefix for network device
501 * @unit: id for network device
502 *
503 * Check boot time settings for the base address of device.
504 * The found settings are set for the device to be used
505 * later in the device probing.
506 * Returns 0 if no settings found.
507 */
508unsigned long netdev_boot_base(const char *prefix, int unit)
509{
510 const struct netdev_boot_setup *s = dev_boot_setup;
511 char name[IFNAMSIZ];
512 int i;
513
514 sprintf(name, "%s%d", prefix, unit);
515
516 /*
517 * If device already registered then return base of 1
518 * to indicate not to probe for this interface
519 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700520 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 return 1;
522
523 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
524 if (!strcmp(name, s[i].name))
525 return s[i].map.base_addr;
526 return 0;
527}
528
529/*
530 * Saves at boot time configured settings for any netdevice.
531 */
532int __init netdev_boot_setup(char *str)
533{
534 int ints[5];
535 struct ifmap map;
536
537 str = get_options(str, ARRAY_SIZE(ints), ints);
538 if (!str || !*str)
539 return 0;
540
541 /* Save settings */
542 memset(&map, 0, sizeof(map));
543 if (ints[0] > 0)
544 map.irq = ints[1];
545 if (ints[0] > 1)
546 map.base_addr = ints[2];
547 if (ints[0] > 2)
548 map.mem_start = ints[3];
549 if (ints[0] > 3)
550 map.mem_end = ints[4];
551
552 /* Add new entry to the list */
553 return netdev_boot_setup_add(str, &map);
554}
555
556__setup("netdev=", netdev_boot_setup);
557
558/*******************************************************************************
559
560 Device Interface Subroutines
561
562*******************************************************************************/
563
564/**
565 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700566 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 * @name: name to find
568 *
569 * Find an interface by name. Must be called under RTNL semaphore
570 * or @dev_base_lock. If the name is found a pointer to the device
571 * is returned. If the name is not found then %NULL is returned. The
572 * reference counters are not incremented so the caller must be
573 * careful with locks.
574 */
575
Eric W. Biederman881d9662007-09-17 11:56:21 -0700576struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
578 struct hlist_node *p;
579
Eric W. Biederman881d9662007-09-17 11:56:21 -0700580 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 struct net_device *dev
582 = hlist_entry(p, struct net_device, name_hlist);
583 if (!strncmp(dev->name, name, IFNAMSIZ))
584 return dev;
585 }
586 return NULL;
587}
588
589/**
590 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700591 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 * @name: name to find
593 *
594 * Find an interface by name. This can be called from any
595 * context and does its own locking. The returned handle has
596 * the usage count incremented and the caller must use dev_put() to
597 * release it when it is no longer needed. %NULL is returned if no
598 * matching device is found.
599 */
600
Eric W. Biederman881d9662007-09-17 11:56:21 -0700601struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602{
603 struct net_device *dev;
604
605 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700606 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (dev)
608 dev_hold(dev);
609 read_unlock(&dev_base_lock);
610 return dev;
611}
612
613/**
614 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700615 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 * @ifindex: index of device
617 *
618 * Search for an interface by index. Returns %NULL if the device
619 * is not found or a pointer to the device. The device has not
620 * had its reference counter increased so the caller must be careful
621 * about locking. The caller must hold either the RTNL semaphore
622 * or @dev_base_lock.
623 */
624
Eric W. Biederman881d9662007-09-17 11:56:21 -0700625struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 struct hlist_node *p;
628
Eric W. Biederman881d9662007-09-17 11:56:21 -0700629 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 struct net_device *dev
631 = hlist_entry(p, struct net_device, index_hlist);
632 if (dev->ifindex == ifindex)
633 return dev;
634 }
635 return NULL;
636}
637
638
639/**
640 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700641 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 * @ifindex: index of device
643 *
644 * Search for an interface by index. Returns NULL if the device
645 * is not found or a pointer to the device. The device returned has
646 * had a reference added and the pointer is safe until the user calls
647 * dev_put to indicate they have finished with it.
648 */
649
Eric W. Biederman881d9662007-09-17 11:56:21 -0700650struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 struct net_device *dev;
653
654 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700655 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 if (dev)
657 dev_hold(dev);
658 read_unlock(&dev_base_lock);
659 return dev;
660}
661
662/**
663 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700664 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 * @type: media type of device
666 * @ha: hardware address
667 *
668 * Search for an interface by MAC address. Returns NULL if the device
669 * is not found or a pointer to the device. The caller must hold the
670 * rtnl semaphore. The returned device has not had its ref count increased
671 * and the caller must therefore be careful about locking
672 *
673 * BUGS:
674 * If the API was consistent this would be __dev_get_by_hwaddr
675 */
676
Eric W. Biederman881d9662007-09-17 11:56:21 -0700677struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678{
679 struct net_device *dev;
680
681 ASSERT_RTNL();
682
Denis V. Lunev81103a52007-12-12 10:47:38 -0800683 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (dev->type == type &&
685 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700686 return dev;
687
688 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689}
690
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300691EXPORT_SYMBOL(dev_getbyhwaddr);
692
Eric W. Biederman881d9662007-09-17 11:56:21 -0700693struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700694{
695 struct net_device *dev;
696
697 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700698 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700699 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700700 return dev;
701
702 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700703}
704
705EXPORT_SYMBOL(__dev_getfirstbyhwtype);
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct net_device *dev;
710
711 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700712 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700713 if (dev)
714 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 rtnl_unlock();
716 return dev;
717}
718
719EXPORT_SYMBOL(dev_getfirstbyhwtype);
720
721/**
722 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700723 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 * @if_flags: IFF_* values
725 * @mask: bitmask of bits in if_flags to check
726 *
727 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900728 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
731 */
732
Eric W. Biederman881d9662007-09-17 11:56:21 -0700733struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700735 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Pavel Emelianov7562f872007-05-03 15:13:45 -0700737 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700739 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 if (((dev->flags ^ if_flags) & mask) == 0) {
741 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700742 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 break;
744 }
745 }
746 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700747 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
750/**
751 * dev_valid_name - check if name is okay for network device
752 * @name: name string
753 *
754 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700755 * to allow sysfs to work. We also disallow any kind of
756 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800758int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700760 if (*name == '\0')
761 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700762 if (strlen(name) >= IFNAMSIZ)
763 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700764 if (!strcmp(name, ".") || !strcmp(name, ".."))
765 return 0;
766
767 while (*name) {
768 if (*name == '/' || isspace(*name))
769 return 0;
770 name++;
771 }
772 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
774
775/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200776 * __dev_alloc_name - allocate a name for a device
777 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200779 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 *
781 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700782 * id. It scans list of devices to build up a free map, then chooses
783 * the first empty slot. The caller must hold the dev_base or rtnl lock
784 * while allocating the name and adding the device in order to avoid
785 * duplicates.
786 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
787 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 */
789
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200790static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
792 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 const char *p;
794 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700795 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct net_device *d;
797
798 p = strnchr(name, IFNAMSIZ-1, '%');
799 if (p) {
800 /*
801 * Verify the string as this thing may have come from
802 * the user. There must be either one "%d" and no other "%"
803 * characters.
804 */
805 if (p[1] != 'd' || strchr(p + 2, '%'))
806 return -EINVAL;
807
808 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700809 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 if (!inuse)
811 return -ENOMEM;
812
Eric W. Biederman881d9662007-09-17 11:56:21 -0700813 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!sscanf(d->name, name, &i))
815 continue;
816 if (i < 0 || i >= max_netdevices)
817 continue;
818
819 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200820 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 if (!strncmp(buf, d->name, IFNAMSIZ))
822 set_bit(i, inuse);
823 }
824
825 i = find_first_zero_bit(inuse, max_netdevices);
826 free_page((unsigned long) inuse);
827 }
828
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200829 snprintf(buf, IFNAMSIZ, name, i);
830 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
833 /* It is possible to run out of possible slots
834 * when the name is long and there isn't enough space left
835 * for the digits, or if all bits are used.
836 */
837 return -ENFILE;
838}
839
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200840/**
841 * dev_alloc_name - allocate a name for a device
842 * @dev: device
843 * @name: name format string
844 *
845 * Passed a format string - eg "lt%d" it will try and find a suitable
846 * id. It scans list of devices to build up a free map, then chooses
847 * the first empty slot. The caller must hold the dev_base or rtnl lock
848 * while allocating the name and adding the device in order to avoid
849 * duplicates.
850 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
851 * Returns the number of the unit assigned or a negative errno code.
852 */
853
854int dev_alloc_name(struct net_device *dev, const char *name)
855{
856 char buf[IFNAMSIZ];
857 struct net *net;
858 int ret;
859
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900860 BUG_ON(!dev_net(dev));
861 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200862 ret = __dev_alloc_name(net, name, buf);
863 if (ret >= 0)
864 strlcpy(dev->name, buf, IFNAMSIZ);
865 return ret;
866}
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869/**
870 * dev_change_name - change name of a device
871 * @dev: device
872 * @newname: name (or format string) must be at least IFNAMSIZ
873 *
874 * Change name of a device, can pass format strings "eth%d".
875 * for wildcarding.
876 */
877int dev_change_name(struct net_device *dev, char *newname)
878{
Herbert Xufcc5a032007-07-30 17:03:38 -0700879 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700881 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700882 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900885 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900887 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 if (dev->flags & IFF_UP)
889 return -EBUSY;
890
891 if (!dev_valid_name(newname))
892 return -EINVAL;
893
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700894 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
895 return 0;
896
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 memcpy(oldname, dev->name, IFNAMSIZ);
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if (strchr(newname, '%')) {
900 err = dev_alloc_name(dev, newname);
901 if (err < 0)
902 return err;
903 strcpy(newname, dev->name);
904 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700905 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 return -EEXIST;
907 else
908 strlcpy(dev->name, newname, IFNAMSIZ);
909
Herbert Xufcc5a032007-07-30 17:03:38 -0700910rollback:
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700911 err = device_rename(&dev->dev, dev->name);
912 if (err) {
913 memcpy(dev->name, oldname, IFNAMSIZ);
914 return err;
915 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700916
917 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600918 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700919 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700920 write_unlock_bh(&dev_base_lock);
921
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700922 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700923 ret = notifier_to_errno(ret);
924
925 if (ret) {
926 if (err) {
927 printk(KERN_ERR
928 "%s: name change rollback failed: %d.\n",
929 dev->name, ret);
930 } else {
931 err = ret;
932 memcpy(dev->name, oldname, IFNAMSIZ);
933 goto rollback;
934 }
935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936
937 return err;
938}
939
940/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700941 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700942 * @dev: device to cause notification
943 *
944 * Called to indicate a device has changed features.
945 */
946void netdev_features_change(struct net_device *dev)
947{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700948 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700949}
950EXPORT_SYMBOL(netdev_features_change);
951
952/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 * netdev_state_change - device changes state
954 * @dev: device to cause notification
955 *
956 * Called to indicate a device has changed state. This function calls
957 * the notifier chains for netdev_chain and sends a NEWLINK message
958 * to the routing socket.
959 */
960void netdev_state_change(struct net_device *dev)
961{
962 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700963 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
965 }
966}
967
Or Gerlitzc1da4ac2008-06-13 18:12:00 -0700968void netdev_bonding_change(struct net_device *dev)
969{
970 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
971}
972EXPORT_SYMBOL(netdev_bonding_change);
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974/**
975 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700976 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 * @name: name of interface
978 *
979 * If a network interface is not present and the process has suitable
980 * privileges this function loads the module. If module loading is not
981 * available in this kernel then it becomes a nop.
982 */
983
Eric W. Biederman881d9662007-09-17 11:56:21 -0700984void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900986 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700989 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 read_unlock(&dev_base_lock);
991
992 if (!dev && capable(CAP_SYS_MODULE))
993 request_module("%s", name);
994}
995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996/**
997 * dev_open - prepare an interface for use.
998 * @dev: device to open
999 *
1000 * Takes a device from down to up state. The device's private open
1001 * function is invoked and then the multicast lists are loaded. Finally
1002 * the device is moved into the up state and a %NETDEV_UP message is
1003 * sent to the netdev notifier chain.
1004 *
1005 * Calling this function on an active interface is a nop. On a failure
1006 * a negative errno code is returned.
1007 */
1008int dev_open(struct net_device *dev)
1009{
1010 int ret = 0;
1011
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001012 ASSERT_RTNL();
1013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 /*
1015 * Is it already up?
1016 */
1017
1018 if (dev->flags & IFF_UP)
1019 return 0;
1020
1021 /*
1022 * Is it even present?
1023 */
1024 if (!netif_device_present(dev))
1025 return -ENODEV;
1026
1027 /*
1028 * Call device private open method
1029 */
1030 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001031
1032 if (dev->validate_addr)
1033 ret = dev->validate_addr(dev);
1034
1035 if (!ret && dev->open)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 ret = dev->open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001038 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 * If it went open OK then:
1040 */
1041
Jeff Garzikbada3392007-10-23 20:19:37 -07001042 if (ret)
1043 clear_bit(__LINK_STATE_START, &dev->state);
1044 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 /*
1046 * Set the flags.
1047 */
1048 dev->flags |= IFF_UP;
1049
1050 /*
1051 * Initialize multicasting status
1052 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001053 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 /*
1056 * Wakeup transmit queue engine
1057 */
1058 dev_activate(dev);
1059
1060 /*
1061 * ... and announce new interface.
1062 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001063 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 return ret;
1067}
1068
1069/**
1070 * dev_close - shutdown an interface.
1071 * @dev: device to shutdown
1072 *
1073 * This function moves an active device into down state. A
1074 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1075 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1076 * chain.
1077 */
1078int dev_close(struct net_device *dev)
1079{
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001080 ASSERT_RTNL();
1081
David S. Miller9d5010d2007-09-12 14:33:25 +02001082 might_sleep();
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (!(dev->flags & IFF_UP))
1085 return 0;
1086
1087 /*
1088 * Tell people we are going down, so that they can
1089 * prepare to death, when device is still operating.
1090 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001091 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 clear_bit(__LINK_STATE_START, &dev->state);
1094
1095 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001096 * it can be even on different cpu. So just clear netif_running().
1097 *
1098 * dev->stop() will invoke napi_disable() on all of it's
1099 * napi_struct instances on this device.
1100 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001103 dev_deactivate(dev);
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /*
1106 * Call the device specific close. This cannot fail.
1107 * Only if device is UP
1108 *
1109 * We allow it to be called even after a DETACH hot-plug
1110 * event.
1111 */
1112 if (dev->stop)
1113 dev->stop(dev);
1114
1115 /*
1116 * Device is now down.
1117 */
1118
1119 dev->flags &= ~IFF_UP;
1120
1121 /*
1122 * Tell people we are down
1123 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001124 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
1126 return 0;
1127}
1128
1129
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001130/**
1131 * dev_disable_lro - disable Large Receive Offload on a device
1132 * @dev: device
1133 *
1134 * Disable Large Receive Offload (LRO) on a net device. Must be
1135 * called under RTNL. This is needed if received packets may be
1136 * forwarded to another interface.
1137 */
1138void dev_disable_lro(struct net_device *dev)
1139{
1140 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1141 dev->ethtool_ops->set_flags) {
1142 u32 flags = dev->ethtool_ops->get_flags(dev);
1143 if (flags & ETH_FLAG_LRO) {
1144 flags &= ~ETH_FLAG_LRO;
1145 dev->ethtool_ops->set_flags(dev, flags);
1146 }
1147 }
1148 WARN_ON(dev->features & NETIF_F_LRO);
1149}
1150EXPORT_SYMBOL(dev_disable_lro);
1151
1152
Eric W. Biederman881d9662007-09-17 11:56:21 -07001153static int dev_boot_phase = 1;
1154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155/*
1156 * Device change register/unregister. These are not inline or static
1157 * as we export them to the world.
1158 */
1159
1160/**
1161 * register_netdevice_notifier - register a network notifier block
1162 * @nb: notifier
1163 *
1164 * Register a notifier to be called when network device events occur.
1165 * The notifier passed is linked into the kernel structures and must
1166 * not be reused until it has been unregistered. A negative errno code
1167 * is returned on a failure.
1168 *
1169 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001170 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 * view of the network device list.
1172 */
1173
1174int register_netdevice_notifier(struct notifier_block *nb)
1175{
1176 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001177 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001178 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 int err;
1180
1181 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001182 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001183 if (err)
1184 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001185 if (dev_boot_phase)
1186 goto unlock;
1187 for_each_net(net) {
1188 for_each_netdev(net, dev) {
1189 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1190 err = notifier_to_errno(err);
1191 if (err)
1192 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Eric W. Biederman881d9662007-09-17 11:56:21 -07001194 if (!(dev->flags & IFF_UP))
1195 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001196
Eric W. Biederman881d9662007-09-17 11:56:21 -07001197 nb->notifier_call(nb, NETDEV_UP, dev);
1198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001200
1201unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 rtnl_unlock();
1203 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001204
1205rollback:
1206 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001207 for_each_net(net) {
1208 for_each_netdev(net, dev) {
1209 if (dev == last)
1210 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001211
Eric W. Biederman881d9662007-09-17 11:56:21 -07001212 if (dev->flags & IFF_UP) {
1213 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1214 nb->notifier_call(nb, NETDEV_DOWN, dev);
1215 }
1216 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001217 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001218 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001219
1220 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001221 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222}
1223
1224/**
1225 * unregister_netdevice_notifier - unregister a network notifier block
1226 * @nb: notifier
1227 *
1228 * Unregister a notifier previously registered by
1229 * register_netdevice_notifier(). The notifier is unlinked into the
1230 * kernel structures and may then be reused. A negative errno code
1231 * is returned on a failure.
1232 */
1233
1234int unregister_netdevice_notifier(struct notifier_block *nb)
1235{
Herbert Xu9f514952006-03-25 01:24:25 -08001236 int err;
1237
1238 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001239 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001240 rtnl_unlock();
1241 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242}
1243
1244/**
1245 * call_netdevice_notifiers - call all network notifier blocks
1246 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001247 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 *
1249 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001250 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 */
1252
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001253int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001255 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
1257
1258/* When > 0 there are consumers of rx skb time stamps */
1259static atomic_t netstamp_needed = ATOMIC_INIT(0);
1260
1261void net_enable_timestamp(void)
1262{
1263 atomic_inc(&netstamp_needed);
1264}
1265
1266void net_disable_timestamp(void)
1267{
1268 atomic_dec(&netstamp_needed);
1269}
1270
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001271static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272{
1273 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001274 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001275 else
1276 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
1279/*
1280 * Support routine. Sends outgoing frames to any network
1281 * taps currently in use.
1282 */
1283
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001284static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
1286 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001287
1288 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 rcu_read_lock();
1291 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1292 /* Never send packets back to the socket
1293 * they originated from - MvS (miquels@drinkel.ow.org)
1294 */
1295 if ((ptype->dev == dev || !ptype->dev) &&
1296 (ptype->af_packet_priv == NULL ||
1297 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1298 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1299 if (!skb2)
1300 break;
1301
1302 /* skb->nh should be correctly
1303 set by sender, so that the second statement is
1304 just protection against buggy protocols.
1305 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001306 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001308 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001309 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (net_ratelimit())
1311 printk(KERN_CRIT "protocol %04x is "
1312 "buggy, dev %s\n",
1313 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001314 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
1316
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001317 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001319 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 }
1321 }
1322 rcu_read_unlock();
1323}
1324
Denis Vlasenko56079432006-03-29 15:57:29 -08001325
David S. Miller37437bb2008-07-16 02:15:04 -07001326void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001327{
David S. Miller37437bb2008-07-16 02:15:04 -07001328 BUG_ON(q == &noop_qdisc);
David S. Miller86d804e2008-07-08 23:11:25 -07001329
David S. Miller37437bb2008-07-16 02:15:04 -07001330 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
Denis Vlasenko56079432006-03-29 15:57:29 -08001331 struct softnet_data *sd;
David S. Miller86d804e2008-07-08 23:11:25 -07001332 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001333
1334 local_irq_save(flags);
1335 sd = &__get_cpu_var(softnet_data);
David S. Miller37437bb2008-07-16 02:15:04 -07001336 q->next_sched = sd->output_queue;
1337 sd->output_queue = q;
Denis Vlasenko56079432006-03-29 15:57:29 -08001338 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1339 local_irq_restore(flags);
1340 }
1341}
1342EXPORT_SYMBOL(__netif_schedule);
1343
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001344void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001345{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001346 if (atomic_dec_and_test(&skb->users)) {
1347 struct softnet_data *sd;
1348 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001349
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001350 local_irq_save(flags);
1351 sd = &__get_cpu_var(softnet_data);
1352 skb->next = sd->completion_queue;
1353 sd->completion_queue = skb;
1354 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1355 local_irq_restore(flags);
1356 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001357}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001358EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001359
1360void dev_kfree_skb_any(struct sk_buff *skb)
1361{
1362 if (in_irq() || irqs_disabled())
1363 dev_kfree_skb_irq(skb);
1364 else
1365 dev_kfree_skb(skb);
1366}
1367EXPORT_SYMBOL(dev_kfree_skb_any);
1368
1369
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001370/**
1371 * netif_device_detach - mark device as removed
1372 * @dev: network device
1373 *
1374 * Mark device as removed from system and therefore no longer available.
1375 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001376void netif_device_detach(struct net_device *dev)
1377{
1378 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1379 netif_running(dev)) {
1380 netif_stop_queue(dev);
1381 }
1382}
1383EXPORT_SYMBOL(netif_device_detach);
1384
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001385/**
1386 * netif_device_attach - mark device as attached
1387 * @dev: network device
1388 *
1389 * Mark device as attached from system and restart if needed.
1390 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001391void netif_device_attach(struct net_device *dev)
1392{
1393 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1394 netif_running(dev)) {
1395 netif_wake_queue(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001396 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001397 }
1398}
1399EXPORT_SYMBOL(netif_device_attach);
1400
Ben Hutchings6de329e2008-06-16 17:02:28 -07001401static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1402{
1403 return ((features & NETIF_F_GEN_CSUM) ||
1404 ((features & NETIF_F_IP_CSUM) &&
1405 protocol == htons(ETH_P_IP)) ||
1406 ((features & NETIF_F_IPV6_CSUM) &&
1407 protocol == htons(ETH_P_IPV6)));
1408}
1409
1410static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1411{
1412 if (can_checksum_protocol(dev->features, skb->protocol))
1413 return true;
1414
1415 if (skb->protocol == htons(ETH_P_8021Q)) {
1416 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1417 if (can_checksum_protocol(dev->features & dev->vlan_features,
1418 veh->h_vlan_encapsulated_proto))
1419 return true;
1420 }
1421
1422 return false;
1423}
Denis Vlasenko56079432006-03-29 15:57:29 -08001424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425/*
1426 * Invalidate hardware checksum when packet is to be mangled, and
1427 * complete checksum manually on outgoing path.
1428 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001429int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
Al Virod3bc23e2006-11-14 21:24:49 -08001431 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001432 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Patrick McHardy84fa7932006-08-29 16:44:56 -07001434 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001435 goto out_set_summed;
1436
1437 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001438 /* Let GSO fix up the checksum. */
1439 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 }
1441
Herbert Xua0308472007-10-15 01:47:15 -07001442 offset = skb->csum_start - skb_headroom(skb);
1443 BUG_ON(offset >= skb_headlen(skb));
1444 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1445
1446 offset += skb->csum_offset;
1447 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1448
1449 if (skb_cloned(skb) &&
1450 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1452 if (ret)
1453 goto out;
1454 }
1455
Herbert Xua0308472007-10-15 01:47:15 -07001456 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001457out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001459out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return ret;
1461}
1462
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001463/**
1464 * skb_gso_segment - Perform segmentation on skb.
1465 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001466 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001467 *
1468 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001469 *
1470 * It may return NULL if the skb requires no segmentation. This is
1471 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001472 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001473struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001474{
1475 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1476 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001477 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001478 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001479
1480 BUG_ON(skb_shinfo(skb)->frag_list);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001481
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001482 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001483 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001484 __skb_pull(skb, skb->mac_len);
1485
Herbert Xuf9d106a2007-04-23 22:36:13 -07001486 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001487 if (skb_header_cloned(skb) &&
1488 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1489 return ERR_PTR(err);
1490 }
1491
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001492 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001493 list_for_each_entry_rcu(ptype,
1494 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001495 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001496 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001497 err = ptype->gso_send_check(skb);
1498 segs = ERR_PTR(err);
1499 if (err || skb_gso_ok(skb, features))
1500 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001501 __skb_push(skb, (skb->data -
1502 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001503 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001504 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001505 break;
1506 }
1507 }
1508 rcu_read_unlock();
1509
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001510 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001511
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001512 return segs;
1513}
1514
1515EXPORT_SYMBOL(skb_gso_segment);
1516
Herbert Xufb286bb2005-11-10 13:01:24 -08001517/* Take action when hardware reception checksum errors are detected. */
1518#ifdef CONFIG_BUG
1519void netdev_rx_csum_fault(struct net_device *dev)
1520{
1521 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001522 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001523 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001524 dump_stack();
1525 }
1526}
1527EXPORT_SYMBOL(netdev_rx_csum_fault);
1528#endif
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530/* Actually, we should eliminate this check as soon as we know, that:
1531 * 1. IOMMU is present and allows to map all the memory.
1532 * 2. No high memory really exists on this machine.
1533 */
1534
1535static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1536{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001537#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 int i;
1539
1540 if (dev->features & NETIF_F_HIGHDMA)
1541 return 0;
1542
1543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1544 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1545 return 1;
1546
Herbert Xu3d3a8532006-06-27 13:33:10 -07001547#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 return 0;
1549}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001551struct dev_gso_cb {
1552 void (*destructor)(struct sk_buff *skb);
1553};
1554
1555#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1556
1557static void dev_gso_skb_destructor(struct sk_buff *skb)
1558{
1559 struct dev_gso_cb *cb;
1560
1561 do {
1562 struct sk_buff *nskb = skb->next;
1563
1564 skb->next = nskb->next;
1565 nskb->next = NULL;
1566 kfree_skb(nskb);
1567 } while (skb->next);
1568
1569 cb = DEV_GSO_CB(skb);
1570 if (cb->destructor)
1571 cb->destructor(skb);
1572}
1573
1574/**
1575 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1576 * @skb: buffer to segment
1577 *
1578 * This function segments the given skb and stores the list of segments
1579 * in skb->next.
1580 */
1581static int dev_gso_segment(struct sk_buff *skb)
1582{
1583 struct net_device *dev = skb->dev;
1584 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001585 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1586 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001587
Herbert Xu576a30e2006-06-27 13:22:38 -07001588 segs = skb_gso_segment(skb, features);
1589
1590 /* Verifying header integrity only. */
1591 if (!segs)
1592 return 0;
1593
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001594 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001595 return PTR_ERR(segs);
1596
1597 skb->next = segs;
1598 DEV_GSO_CB(skb)->destructor = skb->destructor;
1599 skb->destructor = dev_gso_skb_destructor;
1600
1601 return 0;
1602}
1603
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001604int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1605 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001606{
1607 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001608 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001609 dev_queue_xmit_nit(skb, dev);
1610
Herbert Xu576a30e2006-06-27 13:22:38 -07001611 if (netif_needs_gso(dev, skb)) {
1612 if (unlikely(dev_gso_segment(skb)))
1613 goto out_kfree_skb;
1614 if (skb->next)
1615 goto gso;
1616 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001617
Herbert Xu576a30e2006-06-27 13:22:38 -07001618 return dev->hard_start_xmit(skb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001619 }
1620
Herbert Xu576a30e2006-06-27 13:22:38 -07001621gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001622 do {
1623 struct sk_buff *nskb = skb->next;
1624 int rc;
1625
1626 skb->next = nskb->next;
1627 nskb->next = NULL;
1628 rc = dev->hard_start_xmit(nskb, dev);
1629 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001630 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001631 skb->next = nskb;
1632 return rc;
1633 }
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001634 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001635 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001636 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001637
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001638 skb->destructor = DEV_GSO_CB(skb)->destructor;
1639
1640out_kfree_skb:
1641 kfree_skb(skb);
1642 return 0;
1643}
1644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645/**
1646 * dev_queue_xmit - transmit a buffer
1647 * @skb: buffer to transmit
1648 *
1649 * Queue a buffer for transmission to a network device. The caller must
1650 * have set the device and priority and built the buffer before calling
1651 * this function. The function can be called from an interrupt.
1652 *
1653 * A negative errno code is returned on a failure. A success does not
1654 * guarantee the frame will be transmitted as it may be dropped due
1655 * to congestion or traffic shaping.
Ben Greearaf191362005-04-24 20:12:36 -07001656 *
1657 * -----------------------------------------------------------------------------------
1658 * I notice this method can also return errors from the queue disciplines,
1659 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1660 * be positive.
1661 *
1662 * Regardless of the return value, the skb is consumed, so it is currently
1663 * difficult to retry a send to this method. (You can bump the ref count
1664 * before sending to hold a reference for retry if you are careful.)
1665 *
1666 * When calling this method, interrupts MUST be enabled. This is because
1667 * the BH enable code must have IRQs enabled so that it will not deadlock.
1668 * --BLG
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 */
1670
David S. Miller8f0f2222008-07-15 03:47:03 -07001671static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1672{
1673 u32 *addr, *ports, hash, ihl;
1674 u8 ip_proto;
1675 int alen;
1676
1677 switch (skb->protocol) {
1678 case __constant_htons(ETH_P_IP):
1679 ip_proto = ip_hdr(skb)->protocol;
1680 addr = &ip_hdr(skb)->saddr;
1681 ihl = ip_hdr(skb)->ihl;
1682 alen = 2;
1683 break;
1684 case __constant_htons(ETH_P_IPV6):
1685 ip_proto = ipv6_hdr(skb)->nexthdr;
1686 addr = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1687 ihl = (40 >> 2);
1688 alen = 8;
1689 break;
1690 default:
1691 return 0;
1692 }
1693
1694 ports = (u32 *) (skb_network_header(skb) + (ihl * 4));
1695
1696 hash = 0;
1697 while (alen--)
1698 hash ^= *addr++;
1699
1700 switch (ip_proto) {
1701 case IPPROTO_TCP:
1702 case IPPROTO_UDP:
1703 case IPPROTO_DCCP:
1704 case IPPROTO_ESP:
1705 case IPPROTO_AH:
1706 case IPPROTO_SCTP:
1707 case IPPROTO_UDPLITE:
1708 hash ^= *ports;
1709 break;
1710
1711 default:
1712 break;
1713 }
1714
1715 return hash % dev->real_num_tx_queues;
1716}
1717
David S. Millere8a04642008-07-17 00:34:19 -07001718static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1719 struct sk_buff *skb)
1720{
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001721 u16 queue_index = 0;
1722
David S. Millereae792b2008-07-15 03:03:33 -07001723 if (dev->select_queue)
1724 queue_index = dev->select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001725 else if (dev->real_num_tx_queues > 1)
1726 queue_index = simple_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001727
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001728 skb_set_queue_mapping(skb, queue_index);
1729 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001730}
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732int dev_queue_xmit(struct sk_buff *skb)
1733{
1734 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001735 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 struct Qdisc *q;
1737 int rc = -ENOMEM;
1738
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001739 /* GSO will handle the following emulations directly. */
1740 if (netif_needs_gso(dev, skb))
1741 goto gso;
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 if (skb_shinfo(skb)->frag_list &&
1744 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001745 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 goto out_kfree_skb;
1747
1748 /* Fragmented skb is linearized if device does not support SG,
1749 * or if at least one of fragments is in highmem and device
1750 * does not support DMA from it.
1751 */
1752 if (skb_shinfo(skb)->nr_frags &&
1753 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001754 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 goto out_kfree_skb;
1756
1757 /* If packet is not checksummed and device does not support
1758 * checksumming for this protocol, complete checksumming here.
1759 */
Herbert Xu663ead32007-04-09 11:59:07 -07001760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1761 skb_set_transport_header(skb, skb->csum_start -
1762 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001763 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1764 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001767gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001768 /* Disable soft irqs for various locks below. Also
1769 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001771 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
David S. Millereae792b2008-07-15 03:03:33 -07001773 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001774 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776#ifdef CONFIG_NET_CLS_ACT
1777 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1778#endif
1779 if (q->enqueue) {
David S. Miller37437bb2008-07-16 02:15:04 -07001780 spinlock_t *root_lock = qdisc_root_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
David S. Miller37437bb2008-07-16 02:15:04 -07001782 spin_lock(root_lock);
1783
1784 rc = q->enqueue(skb, q);
1785 qdisc_run(q);
1786
1787 spin_unlock(root_lock);
1788
1789 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1790 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 }
1792
1793 /* The device has no queue. Common case for software devices:
1794 loopback, all the sorts of tunnels...
1795
Herbert Xu932ff272006-06-09 12:20:56 -07001796 Really, it is unlikely that netif_tx_lock protection is necessary
1797 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 counters.)
1799 However, it is possible, that they rely on protection
1800 made by us here.
1801
1802 Check this and shot the lock. It is not prone from deadlocks.
1803 Either shot noqueue qdisc, it is even simpler 8)
1804 */
1805 if (dev->flags & IFF_UP) {
1806 int cpu = smp_processor_id(); /* ok because BHs are off */
1807
David S. Millerc773e842008-07-08 23:13:53 -07001808 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
David S. Millerc773e842008-07-08 23:13:53 -07001810 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001812 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001814 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001815 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 goto out;
1817 }
1818 }
David S. Millerc773e842008-07-08 23:13:53 -07001819 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 if (net_ratelimit())
1821 printk(KERN_CRIT "Virtual device %s asks to "
1822 "queue packet!\n", dev->name);
1823 } else {
1824 /* Recursion is detected! It is possible,
1825 * unfortunately */
1826 if (net_ratelimit())
1827 printk(KERN_CRIT "Dead loop on virtual device "
1828 "%s, fix it urgently!\n", dev->name);
1829 }
1830 }
1831
1832 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001833 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835out_kfree_skb:
1836 kfree_skb(skb);
1837 return rc;
1838out:
Herbert Xud4828d82006-06-22 02:28:18 -07001839 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 return rc;
1841}
1842
1843
1844/*=======================================================================
1845 Receiver routines
1846 =======================================================================*/
1847
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001848int netdev_max_backlog __read_mostly = 1000;
1849int netdev_budget __read_mostly = 300;
1850int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
1852DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1853
1854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855/**
1856 * netif_rx - post buffer to the network code
1857 * @skb: buffer to post
1858 *
1859 * This function receives a packet from a device driver and queues it for
1860 * the upper (protocol) levels to process. It always succeeds. The buffer
1861 * may be dropped during processing for congestion control or by the
1862 * protocol layers.
1863 *
1864 * return values:
1865 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 * NET_RX_DROP (packet was dropped)
1867 *
1868 */
1869
1870int netif_rx(struct sk_buff *skb)
1871{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 struct softnet_data *queue;
1873 unsigned long flags;
1874
1875 /* if netpoll wants it, pretend we never saw it */
1876 if (netpoll_rx(skb))
1877 return NET_RX_DROP;
1878
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001879 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001880 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 /*
1883 * The code is rearranged so that the path is the most
1884 * short when CPU is congested, but is still operating.
1885 */
1886 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 queue = &__get_cpu_var(softnet_data);
1888
1889 __get_cpu_var(netdev_rx_stat).total++;
1890 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1891 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892enqueue:
1893 dev_hold(skb->dev);
1894 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001896 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 }
1898
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001899 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 goto enqueue;
1901 }
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 __get_cpu_var(netdev_rx_stat).dropped++;
1904 local_irq_restore(flags);
1905
1906 kfree_skb(skb);
1907 return NET_RX_DROP;
1908}
1909
1910int netif_rx_ni(struct sk_buff *skb)
1911{
1912 int err;
1913
1914 preempt_disable();
1915 err = netif_rx(skb);
1916 if (local_softirq_pending())
1917 do_softirq();
1918 preempt_enable();
1919
1920 return err;
1921}
1922
1923EXPORT_SYMBOL(netif_rx_ni);
1924
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001925static inline struct net_device *skb_bond(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 struct net_device *dev = skb->dev;
1928
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001929 if (dev->master) {
David S. Miller7ea49ed2006-08-14 17:08:36 -07001930 if (skb_bond_should_drop(skb)) {
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001931 kfree_skb(skb);
1932 return NULL;
1933 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 skb->dev = dev->master;
Jay Vosburgh8f903c72006-02-21 16:36:44 -08001935 }
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001936
1937 return dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938}
1939
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941static void net_tx_action(struct softirq_action *h)
1942{
1943 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1944
1945 if (sd->completion_queue) {
1946 struct sk_buff *clist;
1947
1948 local_irq_disable();
1949 clist = sd->completion_queue;
1950 sd->completion_queue = NULL;
1951 local_irq_enable();
1952
1953 while (clist) {
1954 struct sk_buff *skb = clist;
1955 clist = clist->next;
1956
1957 BUG_TRAP(!atomic_read(&skb->users));
1958 __kfree_skb(skb);
1959 }
1960 }
1961
1962 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07001963 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 local_irq_disable();
1966 head = sd->output_queue;
1967 sd->output_queue = NULL;
1968 local_irq_enable();
1969
1970 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07001971 struct Qdisc *q = head;
1972 spinlock_t *root_lock;
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 head = head->next_sched;
1975
1976 smp_mb__before_clear_bit();
David S. Miller37437bb2008-07-16 02:15:04 -07001977 clear_bit(__QDISC_STATE_SCHED, &q->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
David S. Miller37437bb2008-07-16 02:15:04 -07001979 root_lock = qdisc_root_lock(q);
1980 if (spin_trylock(root_lock)) {
1981 qdisc_run(q);
1982 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 } else {
David S. Miller37437bb2008-07-16 02:15:04 -07001984 __netif_schedule(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
1986 }
1987 }
1988}
1989
Stephen Hemminger6f05f622007-03-08 20:46:03 -08001990static inline int deliver_skb(struct sk_buff *skb,
1991 struct packet_type *pt_prev,
1992 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993{
1994 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001995 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996}
1997
1998#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Stephen Hemminger6229e362007-03-21 13:38:47 -07001999/* These hooks defined here for ATM */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000struct net_bridge;
2001struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2002 unsigned char *addr);
Stephen Hemminger6229e362007-03-21 13:38:47 -07002003void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
Stephen Hemminger6229e362007-03-21 13:38:47 -07002005/*
2006 * If bridge module is loaded call bridging hook.
2007 * returns NULL if packet was consumed.
2008 */
2009struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2010 struct sk_buff *skb) __read_mostly;
2011static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2012 struct packet_type **pt_prev, int *ret,
2013 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014{
2015 struct net_bridge_port *port;
2016
Stephen Hemminger6229e362007-03-21 13:38:47 -07002017 if (skb->pkt_type == PACKET_LOOPBACK ||
2018 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2019 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002022 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002024 }
2025
Stephen Hemminger6229e362007-03-21 13:38:47 -07002026 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002029#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030#endif
2031
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002032#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2033struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2034EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2035
2036static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2037 struct packet_type **pt_prev,
2038 int *ret,
2039 struct net_device *orig_dev)
2040{
2041 if (skb->dev->macvlan_port == NULL)
2042 return skb;
2043
2044 if (*pt_prev) {
2045 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2046 *pt_prev = NULL;
2047 }
2048 return macvlan_handle_frame_hook(skb);
2049}
2050#else
2051#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2052#endif
2053
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054#ifdef CONFIG_NET_CLS_ACT
2055/* TODO: Maybe we should just force sch_ingress to be compiled in
2056 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2057 * a compare and 2 stores extra right now if we dont have it on
2058 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002059 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 * the ingress scheduler, you just cant add policies on ingress.
2061 *
2062 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002063static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002066 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002067 struct netdev_queue *rxq;
2068 int result = TC_ACT_OK;
2069 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002070
Herbert Xuf697c3e2007-10-14 00:38:47 -07002071 if (MAX_RED_LOOP < ttl++) {
2072 printk(KERN_WARNING
2073 "Redir loop detected Dropping packet (%d->%d)\n",
2074 skb->iif, dev->ifindex);
2075 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 }
2077
Herbert Xuf697c3e2007-10-14 00:38:47 -07002078 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2079 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2080
David S. Miller555353c2008-07-08 17:33:13 -07002081 rxq = &dev->rx_queue;
2082
2083 spin_lock(&rxq->lock);
David S. Miller816f3252008-07-08 22:49:00 -07002084 if ((q = rxq->qdisc) != NULL)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002085 result = q->enqueue(skb, q);
David S. Miller555353c2008-07-08 17:33:13 -07002086 spin_unlock(&rxq->lock);
Herbert Xuf697c3e2007-10-14 00:38:47 -07002087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 return result;
2089}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002090
2091static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2092 struct packet_type **pt_prev,
2093 int *ret, struct net_device *orig_dev)
2094{
David S. Miller816f3252008-07-08 22:49:00 -07002095 if (!skb->dev->rx_queue.qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002096 goto out;
2097
2098 if (*pt_prev) {
2099 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2100 *pt_prev = NULL;
2101 } else {
2102 /* Huh? Why does turning on AF_PACKET affect this? */
2103 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2104 }
2105
2106 switch (ing_filter(skb)) {
2107 case TC_ACT_SHOT:
2108 case TC_ACT_STOLEN:
2109 kfree_skb(skb);
2110 return NULL;
2111 }
2112
2113out:
2114 skb->tc_verd = 0;
2115 return skb;
2116}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117#endif
2118
Patrick McHardybc1d0412008-07-14 22:49:30 -07002119/*
2120 * netif_nit_deliver - deliver received packets to network taps
2121 * @skb: buffer
2122 *
2123 * This function is used to deliver incoming packets to network
2124 * taps. It should be used when the normal netif_receive_skb path
2125 * is bypassed, for example because of VLAN acceleration.
2126 */
2127void netif_nit_deliver(struct sk_buff *skb)
2128{
2129 struct packet_type *ptype;
2130
2131 if (list_empty(&ptype_all))
2132 return;
2133
2134 skb_reset_network_header(skb);
2135 skb_reset_transport_header(skb);
2136 skb->mac_len = skb->network_header - skb->mac_header;
2137
2138 rcu_read_lock();
2139 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2140 if (!ptype->dev || ptype->dev == skb->dev)
2141 deliver_skb(skb, ptype, skb->dev);
2142 }
2143 rcu_read_unlock();
2144}
2145
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002146/**
2147 * netif_receive_skb - process receive buffer from network
2148 * @skb: buffer to process
2149 *
2150 * netif_receive_skb() is the main receive data processing function.
2151 * It always succeeds. The buffer may be dropped during processing
2152 * for congestion control or by the protocol layers.
2153 *
2154 * This function may only be called from softirq context and interrupts
2155 * should be enabled.
2156 *
2157 * Return values (usually ignored):
2158 * NET_RX_SUCCESS: no congestion
2159 * NET_RX_DROP: packet was dropped
2160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161int netif_receive_skb(struct sk_buff *skb)
2162{
2163 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002164 struct net_device *orig_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002166 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
2168 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002169 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return NET_RX_DROP;
2171
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002172 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002173 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Patrick McHardyc01003c2007-03-29 11:46:52 -07002175 if (!skb->iif)
2176 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002177
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002178 orig_dev = skb_bond(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002180 if (!orig_dev)
2181 return NET_RX_DROP;
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 __get_cpu_var(netdev_rx_stat).total++;
2184
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002185 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002186 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002187 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
2189 pt_prev = NULL;
2190
2191 rcu_read_lock();
2192
Eric W. Biedermanb9f75f42008-06-20 22:16:51 -07002193 /* Don't receive packets in an exiting network namespace */
2194 if (!net_alive(dev_net(skb->dev)))
2195 goto out;
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197#ifdef CONFIG_NET_CLS_ACT
2198 if (skb->tc_verd & TC_NCLS) {
2199 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2200 goto ncls;
2201 }
2202#endif
2203
2204 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2205 if (!ptype->dev || ptype->dev == skb->dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002206 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002207 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 pt_prev = ptype;
2209 }
2210 }
2211
2212#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002213 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2214 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216ncls:
2217#endif
2218
Stephen Hemminger6229e362007-03-21 13:38:47 -07002219 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2220 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002222 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2223 if (!skb)
2224 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
2226 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002227 list_for_each_entry_rcu(ptype,
2228 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 if (ptype->type == type &&
2230 (!ptype->dev || ptype->dev == skb->dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002231 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002232 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 pt_prev = ptype;
2234 }
2235 }
2236
2237 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002238 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 } else {
2240 kfree_skb(skb);
2241 /* Jamal, now you will not able to escape explaining
2242 * me how you were going to use this. :-)
2243 */
2244 ret = NET_RX_DROP;
2245 }
2246
2247out:
2248 rcu_read_unlock();
2249 return ret;
2250}
2251
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002252static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
2254 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2256 unsigned long start_time = jiffies;
2257
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002258 napi->weight = weight_p;
2259 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 struct sk_buff *skb;
2261 struct net_device *dev;
2262
2263 local_irq_disable();
2264 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002265 if (!skb) {
2266 __napi_complete(napi);
2267 local_irq_enable();
2268 break;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 local_irq_enable();
2272
2273 dev = skb->dev;
2274
2275 netif_receive_skb(skb);
2276
2277 dev_put(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002278 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002280 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281}
2282
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002283/**
2284 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002285 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002286 *
2287 * The entry's receive function will be scheduled to run
2288 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002289void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002290{
2291 unsigned long flags;
2292
2293 local_irq_save(flags);
2294 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2295 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2296 local_irq_restore(flags);
2297}
2298EXPORT_SYMBOL(__napi_schedule);
2299
2300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301static void net_rx_action(struct softirq_action *h)
2302{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002303 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 unsigned long start_time = jiffies;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002305 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002306 void *have;
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 local_irq_disable();
2309
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002310 while (!list_empty(list)) {
2311 struct napi_struct *n;
2312 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002314 /* If softirq window is exhuasted then punt.
2315 *
2316 * Note that this is a slight policy change from the
2317 * previous NAPI code, which would allow up to 2
2318 * jiffies to pass before breaking out. The test
2319 * used to be "jiffies - start_time > 1".
2320 */
2321 if (unlikely(budget <= 0 || jiffies != start_time))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 goto softnet_break;
2323
2324 local_irq_enable();
2325
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002326 /* Even though interrupts have been re-enabled, this
2327 * access is safe because interrupts can only add new
2328 * entries to the tail of this list, and only ->poll()
2329 * calls can remove this head entry from the list.
2330 */
2331 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002333 have = netpoll_poll_lock(n);
2334
2335 weight = n->weight;
2336
David S. Miller0a7606c2007-10-29 21:28:47 -07002337 /* This NAPI_STATE_SCHED test is for avoiding a race
2338 * with netpoll's poll_napi(). Only the entity which
2339 * obtains the lock and sees NAPI_STATE_SCHED set will
2340 * actually make the ->poll() call. Therefore we avoid
2341 * accidently calling ->poll() when NAPI is not scheduled.
2342 */
2343 work = 0;
2344 if (test_bit(NAPI_STATE_SCHED, &n->state))
2345 work = n->poll(n, weight);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002346
2347 WARN_ON_ONCE(work > weight);
2348
2349 budget -= work;
2350
2351 local_irq_disable();
2352
2353 /* Drivers must not modify the NAPI state if they
2354 * consume the entire weight. In such cases this code
2355 * still "owns" the NAPI instance and therefore can
2356 * move the instance around on the list at-will.
2357 */
David S. Millerfed17f32008-01-07 21:00:40 -08002358 if (unlikely(work == weight)) {
2359 if (unlikely(napi_disable_pending(n)))
2360 __napi_complete(n);
2361 else
2362 list_move_tail(&n->poll_list, list);
2363 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002364
2365 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 }
2367out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002368 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002369
Chris Leechdb217332006-06-17 21:24:58 -07002370#ifdef CONFIG_NET_DMA
2371 /*
2372 * There may not be any more sk_buffs coming right now, so push
2373 * any pending DMA copies to hardware
2374 */
Dan Williamsd379b012007-07-09 11:56:42 -07002375 if (!cpus_empty(net_dma.channel_mask)) {
2376 int chan_idx;
2377 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2378 struct dma_chan *chan = net_dma.channels[chan_idx];
2379 if (chan)
2380 dma_async_memcpy_issue_pending(chan);
2381 }
Chris Leechdb217332006-06-17 21:24:58 -07002382 }
2383#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002384
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 return;
2386
2387softnet_break:
2388 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2389 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2390 goto out;
2391}
2392
2393static gifconf_func_t * gifconf_list [NPROTO];
2394
2395/**
2396 * register_gifconf - register a SIOCGIF handler
2397 * @family: Address family
2398 * @gifconf: Function handler
2399 *
2400 * Register protocol dependent address dumping routines. The handler
2401 * that is passed must not be freed or reused until it has been replaced
2402 * by another handler.
2403 */
2404int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2405{
2406 if (family >= NPROTO)
2407 return -EINVAL;
2408 gifconf_list[family] = gifconf;
2409 return 0;
2410}
2411
2412
2413/*
2414 * Map an interface index to its name (SIOCGIFNAME)
2415 */
2416
2417/*
2418 * We need this ioctl for efficient implementation of the
2419 * if_indextoname() function required by the IPv6 API. Without
2420 * it, we would have to search all the interfaces to find a
2421 * match. --pb
2422 */
2423
Eric W. Biederman881d9662007-09-17 11:56:21 -07002424static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425{
2426 struct net_device *dev;
2427 struct ifreq ifr;
2428
2429 /*
2430 * Fetch the caller's info block.
2431 */
2432
2433 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2434 return -EFAULT;
2435
2436 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002437 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 if (!dev) {
2439 read_unlock(&dev_base_lock);
2440 return -ENODEV;
2441 }
2442
2443 strcpy(ifr.ifr_name, dev->name);
2444 read_unlock(&dev_base_lock);
2445
2446 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2447 return -EFAULT;
2448 return 0;
2449}
2450
2451/*
2452 * Perform a SIOCGIFCONF call. This structure will change
2453 * size eventually, and there is nothing I can do about it.
2454 * Thus we will need a 'compatibility mode'.
2455 */
2456
Eric W. Biederman881d9662007-09-17 11:56:21 -07002457static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458{
2459 struct ifconf ifc;
2460 struct net_device *dev;
2461 char __user *pos;
2462 int len;
2463 int total;
2464 int i;
2465
2466 /*
2467 * Fetch the caller's info block.
2468 */
2469
2470 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2471 return -EFAULT;
2472
2473 pos = ifc.ifc_buf;
2474 len = ifc.ifc_len;
2475
2476 /*
2477 * Loop over the interfaces, and write an info block for each.
2478 */
2479
2480 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002481 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 for (i = 0; i < NPROTO; i++) {
2483 if (gifconf_list[i]) {
2484 int done;
2485 if (!pos)
2486 done = gifconf_list[i](dev, NULL, 0);
2487 else
2488 done = gifconf_list[i](dev, pos + total,
2489 len - total);
2490 if (done < 0)
2491 return -EFAULT;
2492 total += done;
2493 }
2494 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
2497 /*
2498 * All done. Write the updated control block back to the caller.
2499 */
2500 ifc.ifc_len = total;
2501
2502 /*
2503 * Both BSD and Solaris return 0 here, so we do too.
2504 */
2505 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2506}
2507
2508#ifdef CONFIG_PROC_FS
2509/*
2510 * This is invoked by the /proc filesystem handler to display a device
2511 * in detail.
2512 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002514 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515{
Denis V. Luneve372c412007-11-19 22:31:54 -08002516 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002517 loff_t off;
2518 struct net_device *dev;
2519
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002521 if (!*pos)
2522 return SEQ_START_TOKEN;
2523
2524 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002525 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002526 if (off++ == *pos)
2527 return dev;
2528
2529 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530}
2531
2532void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2533{
Denis V. Luneve372c412007-11-19 22:31:54 -08002534 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002536 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002537 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538}
2539
2540void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002541 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542{
2543 read_unlock(&dev_base_lock);
2544}
2545
2546static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2547{
Rusty Russellc45d2862007-03-28 14:29:08 -07002548 struct net_device_stats *stats = dev->get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
Rusty Russell5a1b5892007-04-28 21:04:03 -07002550 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2551 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2552 dev->name, stats->rx_bytes, stats->rx_packets,
2553 stats->rx_errors,
2554 stats->rx_dropped + stats->rx_missed_errors,
2555 stats->rx_fifo_errors,
2556 stats->rx_length_errors + stats->rx_over_errors +
2557 stats->rx_crc_errors + stats->rx_frame_errors,
2558 stats->rx_compressed, stats->multicast,
2559 stats->tx_bytes, stats->tx_packets,
2560 stats->tx_errors, stats->tx_dropped,
2561 stats->tx_fifo_errors, stats->collisions,
2562 stats->tx_carrier_errors +
2563 stats->tx_aborted_errors +
2564 stats->tx_window_errors +
2565 stats->tx_heartbeat_errors,
2566 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567}
2568
2569/*
2570 * Called from the PROCfs module. This now uses the new arbitrary sized
2571 * /proc/net interface to create /proc/net/dev
2572 */
2573static int dev_seq_show(struct seq_file *seq, void *v)
2574{
2575 if (v == SEQ_START_TOKEN)
2576 seq_puts(seq, "Inter-| Receive "
2577 " | Transmit\n"
2578 " face |bytes packets errs drop fifo frame "
2579 "compressed multicast|bytes packets errs "
2580 "drop fifo colls carrier compressed\n");
2581 else
2582 dev_seq_printf_stats(seq, v);
2583 return 0;
2584}
2585
2586static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2587{
2588 struct netif_rx_stats *rc = NULL;
2589
Mike Travis0c0b0ac2008-05-02 16:43:08 -07002590 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002591 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 rc = &per_cpu(netdev_rx_stat, *pos);
2593 break;
2594 } else
2595 ++*pos;
2596 return rc;
2597}
2598
2599static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2600{
2601 return softnet_get_online(pos);
2602}
2603
2604static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2605{
2606 ++*pos;
2607 return softnet_get_online(pos);
2608}
2609
2610static void softnet_seq_stop(struct seq_file *seq, void *v)
2611{
2612}
2613
2614static int softnet_seq_show(struct seq_file *seq, void *v)
2615{
2616 struct netif_rx_stats *s = v;
2617
2618 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07002619 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07002620 0, 0, 0, 0, /* was fastroute */
2621 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 return 0;
2623}
2624
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002625static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 .start = dev_seq_start,
2627 .next = dev_seq_next,
2628 .stop = dev_seq_stop,
2629 .show = dev_seq_show,
2630};
2631
2632static int dev_seq_open(struct inode *inode, struct file *file)
2633{
Denis V. Luneve372c412007-11-19 22:31:54 -08002634 return seq_open_net(inode, file, &dev_seq_ops,
2635 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636}
2637
Arjan van de Ven9a321442007-02-12 00:55:35 -08002638static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 .owner = THIS_MODULE,
2640 .open = dev_seq_open,
2641 .read = seq_read,
2642 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08002643 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644};
2645
Stephen Hemmingerf6908082007-03-12 14:34:29 -07002646static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 .start = softnet_seq_start,
2648 .next = softnet_seq_next,
2649 .stop = softnet_seq_stop,
2650 .show = softnet_seq_show,
2651};
2652
2653static int softnet_seq_open(struct inode *inode, struct file *file)
2654{
2655 return seq_open(file, &softnet_seq_ops);
2656}
2657
Arjan van de Ven9a321442007-02-12 00:55:35 -08002658static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 .owner = THIS_MODULE,
2660 .open = softnet_seq_open,
2661 .read = seq_read,
2662 .llseek = seq_lseek,
2663 .release = seq_release,
2664};
2665
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002666static void *ptype_get_idx(loff_t pos)
2667{
2668 struct packet_type *pt = NULL;
2669 loff_t i = 0;
2670 int t;
2671
2672 list_for_each_entry_rcu(pt, &ptype_all, list) {
2673 if (i == pos)
2674 return pt;
2675 ++i;
2676 }
2677
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002678 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002679 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2680 if (i == pos)
2681 return pt;
2682 ++i;
2683 }
2684 }
2685 return NULL;
2686}
2687
2688static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002689 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002690{
2691 rcu_read_lock();
2692 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2693}
2694
2695static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2696{
2697 struct packet_type *pt;
2698 struct list_head *nxt;
2699 int hash;
2700
2701 ++*pos;
2702 if (v == SEQ_START_TOKEN)
2703 return ptype_get_idx(0);
2704
2705 pt = v;
2706 nxt = pt->list.next;
2707 if (pt->type == htons(ETH_P_ALL)) {
2708 if (nxt != &ptype_all)
2709 goto found;
2710 hash = 0;
2711 nxt = ptype_base[0].next;
2712 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002713 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002714
2715 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002716 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002717 return NULL;
2718 nxt = ptype_base[hash].next;
2719 }
2720found:
2721 return list_entry(nxt, struct packet_type, list);
2722}
2723
2724static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08002725 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002726{
2727 rcu_read_unlock();
2728}
2729
2730static void ptype_seq_decode(struct seq_file *seq, void *sym)
2731{
2732#ifdef CONFIG_KALLSYMS
2733 unsigned long offset = 0, symsize;
2734 const char *symname;
2735 char *modname;
2736 char namebuf[128];
2737
2738 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2739 &modname, namebuf);
2740
2741 if (symname) {
2742 char *delim = ":";
2743
2744 if (!modname)
2745 modname = delim = "";
2746 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2747 symname, offset);
2748 return;
2749 }
2750#endif
2751
2752 seq_printf(seq, "[%p]", sym);
2753}
2754
2755static int ptype_seq_show(struct seq_file *seq, void *v)
2756{
2757 struct packet_type *pt = v;
2758
2759 if (v == SEQ_START_TOKEN)
2760 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002761 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002762 if (pt->type == htons(ETH_P_ALL))
2763 seq_puts(seq, "ALL ");
2764 else
2765 seq_printf(seq, "%04x", ntohs(pt->type));
2766
2767 seq_printf(seq, " %-8s ",
2768 pt->dev ? pt->dev->name : "");
2769 ptype_seq_decode(seq, pt->func);
2770 seq_putc(seq, '\n');
2771 }
2772
2773 return 0;
2774}
2775
2776static const struct seq_operations ptype_seq_ops = {
2777 .start = ptype_seq_start,
2778 .next = ptype_seq_next,
2779 .stop = ptype_seq_stop,
2780 .show = ptype_seq_show,
2781};
2782
2783static int ptype_seq_open(struct inode *inode, struct file *file)
2784{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002785 return seq_open_net(inode, file, &ptype_seq_ops,
2786 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002787}
2788
2789static const struct file_operations ptype_seq_fops = {
2790 .owner = THIS_MODULE,
2791 .open = ptype_seq_open,
2792 .read = seq_read,
2793 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07002794 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002795};
2796
2797
Pavel Emelyanov46650792007-10-08 20:38:39 -07002798static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 int rc = -ENOMEM;
2801
Eric W. Biederman881d9662007-09-17 11:56:21 -07002802 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002804 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002806 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002807 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07002808
Eric W. Biederman881d9662007-09-17 11:56:21 -07002809 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002810 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 rc = 0;
2812out:
2813 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02002814out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002815 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002817 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07002819 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 goto out;
2821}
Eric W. Biederman881d9662007-09-17 11:56:21 -07002822
Pavel Emelyanov46650792007-10-08 20:38:39 -07002823static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07002824{
2825 wext_proc_exit(net);
2826
2827 proc_net_remove(net, "ptype");
2828 proc_net_remove(net, "softnet_stat");
2829 proc_net_remove(net, "dev");
2830}
2831
Denis V. Lunev022cbae2007-11-13 03:23:50 -08002832static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07002833 .init = dev_proc_net_init,
2834 .exit = dev_proc_net_exit,
2835};
2836
2837static int __init dev_proc_init(void)
2838{
2839 return register_pernet_subsys(&dev_proc_ops);
2840}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841#else
2842#define dev_proc_init() 0
2843#endif /* CONFIG_PROC_FS */
2844
2845
2846/**
2847 * netdev_set_master - set up master/slave pair
2848 * @slave: slave device
2849 * @master: new master device
2850 *
2851 * Changes the master device of the slave. Pass %NULL to break the
2852 * bonding. The caller must hold the RTNL semaphore. On a failure
2853 * a negative errno code is returned. On success the reference counts
2854 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2855 * function returns zero.
2856 */
2857int netdev_set_master(struct net_device *slave, struct net_device *master)
2858{
2859 struct net_device *old = slave->master;
2860
2861 ASSERT_RTNL();
2862
2863 if (master) {
2864 if (old)
2865 return -EBUSY;
2866 dev_hold(master);
2867 }
2868
2869 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 synchronize_net();
2872
2873 if (old)
2874 dev_put(old);
2875
2876 if (master)
2877 slave->flags |= IFF_SLAVE;
2878 else
2879 slave->flags &= ~IFF_SLAVE;
2880
2881 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2882 return 0;
2883}
2884
Wang Chendad9b332008-06-18 01:48:28 -07002885static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07002886{
2887 unsigned short old_flags = dev->flags;
2888
Patrick McHardy24023452007-07-14 18:51:31 -07002889 ASSERT_RTNL();
2890
Wang Chendad9b332008-06-18 01:48:28 -07002891 dev->flags |= IFF_PROMISC;
2892 dev->promiscuity += inc;
2893 if (dev->promiscuity == 0) {
2894 /*
2895 * Avoid overflow.
2896 * If inc causes overflow, untouch promisc and return error.
2897 */
2898 if (inc < 0)
2899 dev->flags &= ~IFF_PROMISC;
2900 else {
2901 dev->promiscuity -= inc;
2902 printk(KERN_WARNING "%s: promiscuity touches roof, "
2903 "set promiscuity failed, promiscuity feature "
2904 "of device might be broken.\n", dev->name);
2905 return -EOVERFLOW;
2906 }
2907 }
Patrick McHardy4417da62007-06-27 01:28:10 -07002908 if (dev->flags != old_flags) {
2909 printk(KERN_INFO "device %s %s promiscuous mode\n",
2910 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
2911 "left");
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05002912 if (audit_enabled)
2913 audit_log(current->audit_context, GFP_ATOMIC,
2914 AUDIT_ANOM_PROMISCUOUS,
2915 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2916 dev->name, (dev->flags & IFF_PROMISC),
2917 (old_flags & IFF_PROMISC),
2918 audit_get_loginuid(current),
2919 current->uid, current->gid,
2920 audit_get_sessionid(current));
Patrick McHardy24023452007-07-14 18:51:31 -07002921
2922 if (dev->change_rx_flags)
2923 dev->change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07002924 }
Wang Chendad9b332008-06-18 01:48:28 -07002925 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002926}
2927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928/**
2929 * dev_set_promiscuity - update promiscuity count on a device
2930 * @dev: device
2931 * @inc: modifier
2932 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07002933 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 * remains above zero the interface remains promiscuous. Once it hits zero
2935 * the device reverts back to normal filtering operation. A negative inc
2936 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07002937 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 */
Wang Chendad9b332008-06-18 01:48:28 -07002939int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940{
2941 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07002942 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
Wang Chendad9b332008-06-18 01:48:28 -07002944 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07002945 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07002946 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07002947 if (dev->flags != old_flags)
2948 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07002949 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950}
2951
2952/**
2953 * dev_set_allmulti - update allmulti count on a device
2954 * @dev: device
2955 * @inc: modifier
2956 *
2957 * Add or remove reception of all multicast frames to a device. While the
2958 * count in the device remains above zero the interface remains listening
2959 * to all interfaces. Once it hits zero the device reverts back to normal
2960 * filtering operation. A negative @inc value is used to drop the counter
2961 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07002962 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 */
2964
Wang Chendad9b332008-06-18 01:48:28 -07002965int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966{
2967 unsigned short old_flags = dev->flags;
2968
Patrick McHardy24023452007-07-14 18:51:31 -07002969 ASSERT_RTNL();
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07002972 dev->allmulti += inc;
2973 if (dev->allmulti == 0) {
2974 /*
2975 * Avoid overflow.
2976 * If inc causes overflow, untouch allmulti and return error.
2977 */
2978 if (inc < 0)
2979 dev->flags &= ~IFF_ALLMULTI;
2980 else {
2981 dev->allmulti -= inc;
2982 printk(KERN_WARNING "%s: allmulti touches roof, "
2983 "set allmulti failed, allmulti feature of "
2984 "device might be broken.\n", dev->name);
2985 return -EOVERFLOW;
2986 }
2987 }
Patrick McHardy24023452007-07-14 18:51:31 -07002988 if (dev->flags ^ old_flags) {
2989 if (dev->change_rx_flags)
2990 dev->change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07002991 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07002992 }
Wang Chendad9b332008-06-18 01:48:28 -07002993 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07002994}
2995
2996/*
2997 * Upload unicast and multicast address lists to device and
2998 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08002999 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003000 * are present.
3001 */
3002void __dev_set_rx_mode(struct net_device *dev)
3003{
3004 /* dev_open will call this function so the list will stay sane. */
3005 if (!(dev->flags&IFF_UP))
3006 return;
3007
3008 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003009 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003010
3011 if (dev->set_rx_mode)
3012 dev->set_rx_mode(dev);
3013 else {
3014 /* Unicast addresses changes may only happen under the rtnl,
3015 * therefore calling __dev_set_promiscuity here is safe.
3016 */
3017 if (dev->uc_count > 0 && !dev->uc_promisc) {
3018 __dev_set_promiscuity(dev, 1);
3019 dev->uc_promisc = 1;
3020 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3021 __dev_set_promiscuity(dev, -1);
3022 dev->uc_promisc = 0;
3023 }
3024
3025 if (dev->set_multicast_list)
3026 dev->set_multicast_list(dev);
3027 }
3028}
3029
3030void dev_set_rx_mode(struct net_device *dev)
3031{
David S. Millerb9e40852008-07-15 00:15:08 -07003032 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003033 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003034 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035}
3036
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003037int __dev_addr_delete(struct dev_addr_list **list, int *count,
3038 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003039{
3040 struct dev_addr_list *da;
3041
3042 for (; (da = *list) != NULL; list = &da->next) {
3043 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3044 alen == da->da_addrlen) {
3045 if (glbl) {
3046 int old_glbl = da->da_gusers;
3047 da->da_gusers = 0;
3048 if (old_glbl == 0)
3049 break;
3050 }
3051 if (--da->da_users)
3052 return 0;
3053
3054 *list = da->next;
3055 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003056 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003057 return 0;
3058 }
3059 }
3060 return -ENOENT;
3061}
3062
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003063int __dev_addr_add(struct dev_addr_list **list, int *count,
3064 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003065{
3066 struct dev_addr_list *da;
3067
3068 for (da = *list; da != NULL; da = da->next) {
3069 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3070 da->da_addrlen == alen) {
3071 if (glbl) {
3072 int old_glbl = da->da_gusers;
3073 da->da_gusers = 1;
3074 if (old_glbl)
3075 return 0;
3076 }
3077 da->da_users++;
3078 return 0;
3079 }
3080 }
3081
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003082 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003083 if (da == NULL)
3084 return -ENOMEM;
3085 memcpy(da->da_addr, addr, alen);
3086 da->da_addrlen = alen;
3087 da->da_users = 1;
3088 da->da_gusers = glbl ? 1 : 0;
3089 da->next = *list;
3090 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003091 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003092 return 0;
3093}
3094
Patrick McHardy4417da62007-06-27 01:28:10 -07003095/**
3096 * dev_unicast_delete - Release secondary unicast address.
3097 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003098 * @addr: address to delete
3099 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003100 *
3101 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003102 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003103 *
3104 * The caller must hold the rtnl_mutex.
3105 */
3106int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3107{
3108 int err;
3109
3110 ASSERT_RTNL();
3111
David S. Millerb9e40852008-07-15 00:15:08 -07003112 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003113 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3114 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003115 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003116 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003117 return err;
3118}
3119EXPORT_SYMBOL(dev_unicast_delete);
3120
3121/**
3122 * dev_unicast_add - add a secondary unicast address
3123 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003124 * @addr: address to add
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003125 * @alen: length of @addr
Patrick McHardy4417da62007-06-27 01:28:10 -07003126 *
3127 * Add a secondary unicast address to the device or increase
3128 * the reference count if it already exists.
3129 *
3130 * The caller must hold the rtnl_mutex.
3131 */
3132int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3133{
3134 int err;
3135
3136 ASSERT_RTNL();
3137
David S. Millerb9e40852008-07-15 00:15:08 -07003138 netif_addr_lock_bh(dev);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003139 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3140 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003141 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003142 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003143 return err;
3144}
3145EXPORT_SYMBOL(dev_unicast_add);
3146
Chris Leeche83a2ea2008-01-31 16:53:23 -08003147int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3148 struct dev_addr_list **from, int *from_count)
3149{
3150 struct dev_addr_list *da, *next;
3151 int err = 0;
3152
3153 da = *from;
3154 while (da != NULL) {
3155 next = da->next;
3156 if (!da->da_synced) {
3157 err = __dev_addr_add(to, to_count,
3158 da->da_addr, da->da_addrlen, 0);
3159 if (err < 0)
3160 break;
3161 da->da_synced = 1;
3162 da->da_users++;
3163 } else if (da->da_users == 1) {
3164 __dev_addr_delete(to, to_count,
3165 da->da_addr, da->da_addrlen, 0);
3166 __dev_addr_delete(from, from_count,
3167 da->da_addr, da->da_addrlen, 0);
3168 }
3169 da = next;
3170 }
3171 return err;
3172}
3173
3174void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3175 struct dev_addr_list **from, int *from_count)
3176{
3177 struct dev_addr_list *da, *next;
3178
3179 da = *from;
3180 while (da != NULL) {
3181 next = da->next;
3182 if (da->da_synced) {
3183 __dev_addr_delete(to, to_count,
3184 da->da_addr, da->da_addrlen, 0);
3185 da->da_synced = 0;
3186 __dev_addr_delete(from, from_count,
3187 da->da_addr, da->da_addrlen, 0);
3188 }
3189 da = next;
3190 }
3191}
3192
3193/**
3194 * dev_unicast_sync - Synchronize device's unicast list to another device
3195 * @to: destination device
3196 * @from: source device
3197 *
3198 * Add newly added addresses to the destination device and release
3199 * addresses that have no users left. The source device must be
3200 * locked by netif_tx_lock_bh.
3201 *
3202 * This function is intended to be called from the dev->set_rx_mode
3203 * function of layered software devices.
3204 */
3205int dev_unicast_sync(struct net_device *to, struct net_device *from)
3206{
3207 int err = 0;
3208
David S. Millerb9e40852008-07-15 00:15:08 -07003209 netif_addr_lock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003210 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3211 &from->uc_list, &from->uc_count);
3212 if (!err)
3213 __dev_set_rx_mode(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003214 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003215 return err;
3216}
3217EXPORT_SYMBOL(dev_unicast_sync);
3218
3219/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003220 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003221 * @to: destination device
3222 * @from: source device
3223 *
3224 * Remove all addresses that were added to the destination device by
3225 * dev_unicast_sync(). This function is intended to be called from the
3226 * dev->stop function of layered software devices.
3227 */
3228void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3229{
David S. Millerb9e40852008-07-15 00:15:08 -07003230 netif_addr_lock_bh(from);
David S. Millere308a5d2008-07-15 00:13:44 -07003231 netif_addr_lock(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003232
3233 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3234 &from->uc_list, &from->uc_count);
3235 __dev_set_rx_mode(to);
3236
David S. Millere308a5d2008-07-15 00:13:44 -07003237 netif_addr_unlock(to);
David S. Millerb9e40852008-07-15 00:15:08 -07003238 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003239}
3240EXPORT_SYMBOL(dev_unicast_unsync);
3241
Denis Cheng12972622007-07-18 02:12:56 -07003242static void __dev_addr_discard(struct dev_addr_list **list)
3243{
3244 struct dev_addr_list *tmp;
3245
3246 while (*list != NULL) {
3247 tmp = *list;
3248 *list = tmp->next;
3249 if (tmp->da_users > tmp->da_gusers)
3250 printk("__dev_addr_discard: address leakage! "
3251 "da_users=%d\n", tmp->da_users);
3252 kfree(tmp);
3253 }
3254}
3255
Denis Cheng26cc2522007-07-18 02:12:03 -07003256static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07003257{
David S. Millerb9e40852008-07-15 00:15:08 -07003258 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07003259
Patrick McHardy4417da62007-06-27 01:28:10 -07003260 __dev_addr_discard(&dev->uc_list);
3261 dev->uc_count = 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003262
Denis Cheng456ad752007-07-18 02:10:54 -07003263 __dev_addr_discard(&dev->mc_list);
3264 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07003265
David S. Millerb9e40852008-07-15 00:15:08 -07003266 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07003267}
3268
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269unsigned dev_get_flags(const struct net_device *dev)
3270{
3271 unsigned flags;
3272
3273 flags = (dev->flags & ~(IFF_PROMISC |
3274 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08003275 IFF_RUNNING |
3276 IFF_LOWER_UP |
3277 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 (dev->gflags & (IFF_PROMISC |
3279 IFF_ALLMULTI));
3280
Stefan Rompfb00055a2006-03-20 17:09:11 -08003281 if (netif_running(dev)) {
3282 if (netif_oper_up(dev))
3283 flags |= IFF_RUNNING;
3284 if (netif_carrier_ok(dev))
3285 flags |= IFF_LOWER_UP;
3286 if (netif_dormant(dev))
3287 flags |= IFF_DORMANT;
3288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
3290 return flags;
3291}
3292
3293int dev_change_flags(struct net_device *dev, unsigned flags)
3294{
Thomas Graf7c355f52007-06-05 16:03:03 -07003295 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 int old_flags = dev->flags;
3297
Patrick McHardy24023452007-07-14 18:51:31 -07003298 ASSERT_RTNL();
3299
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 /*
3301 * Set the flags on our device.
3302 */
3303
3304 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3305 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3306 IFF_AUTOMEDIA)) |
3307 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3308 IFF_ALLMULTI));
3309
3310 /*
3311 * Load in the correct multicast list now the flags have changed.
3312 */
3313
David Woodhouse0e917962008-05-20 14:36:14 -07003314 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
Patrick McHardy24023452007-07-14 18:51:31 -07003315 dev->change_rx_flags(dev, IFF_MULTICAST);
3316
Patrick McHardy4417da62007-06-27 01:28:10 -07003317 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318
3319 /*
3320 * Have we downed the interface. We handle IFF_UP ourselves
3321 * according to user attempts to set it, rather than blindly
3322 * setting it.
3323 */
3324
3325 ret = 0;
3326 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3327 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3328
3329 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07003330 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 }
3332
3333 if (dev->flags & IFF_UP &&
3334 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3335 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003336 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3339 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3340 dev->gflags ^= IFF_PROMISC;
3341 dev_set_promiscuity(dev, inc);
3342 }
3343
3344 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3345 is important. Some (broken) drivers set IFF_PROMISC, when
3346 IFF_ALLMULTI is requested not asking us and not reporting.
3347 */
3348 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3349 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3350 dev->gflags ^= IFF_ALLMULTI;
3351 dev_set_allmulti(dev, inc);
3352 }
3353
Thomas Graf7c355f52007-06-05 16:03:03 -07003354 /* Exclude state transition flags, already notified */
3355 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3356 if (changes)
3357 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
3359 return ret;
3360}
3361
3362int dev_set_mtu(struct net_device *dev, int new_mtu)
3363{
3364 int err;
3365
3366 if (new_mtu == dev->mtu)
3367 return 0;
3368
3369 /* MTU must be positive. */
3370 if (new_mtu < 0)
3371 return -EINVAL;
3372
3373 if (!netif_device_present(dev))
3374 return -ENODEV;
3375
3376 err = 0;
3377 if (dev->change_mtu)
3378 err = dev->change_mtu(dev, new_mtu);
3379 else
3380 dev->mtu = new_mtu;
3381 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003382 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003383 return err;
3384}
3385
3386int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3387{
3388 int err;
3389
3390 if (!dev->set_mac_address)
3391 return -EOPNOTSUPP;
3392 if (sa->sa_family != dev->type)
3393 return -EINVAL;
3394 if (!netif_device_present(dev))
3395 return -ENODEV;
3396 err = dev->set_mac_address(dev, sa);
3397 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003398 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 return err;
3400}
3401
3402/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07003403 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07003405static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406{
3407 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003408 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409
3410 if (!dev)
3411 return -ENODEV;
3412
3413 switch (cmd) {
3414 case SIOCGIFFLAGS: /* Get interface flags */
3415 ifr->ifr_flags = dev_get_flags(dev);
3416 return 0;
3417
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 case SIOCGIFMETRIC: /* Get the metric on the interface
3419 (currently unused) */
3420 ifr->ifr_metric = 0;
3421 return 0;
3422
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 case SIOCGIFMTU: /* Get the MTU of a device */
3424 ifr->ifr_mtu = dev->mtu;
3425 return 0;
3426
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 case SIOCGIFHWADDR:
3428 if (!dev->addr_len)
3429 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3430 else
3431 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3432 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3433 ifr->ifr_hwaddr.sa_family = dev->type;
3434 return 0;
3435
Jeff Garzik14e3e072007-10-08 00:06:32 -07003436 case SIOCGIFSLAVE:
3437 err = -EINVAL;
3438 break;
3439
3440 case SIOCGIFMAP:
3441 ifr->ifr_map.mem_start = dev->mem_start;
3442 ifr->ifr_map.mem_end = dev->mem_end;
3443 ifr->ifr_map.base_addr = dev->base_addr;
3444 ifr->ifr_map.irq = dev->irq;
3445 ifr->ifr_map.dma = dev->dma;
3446 ifr->ifr_map.port = dev->if_port;
3447 return 0;
3448
3449 case SIOCGIFINDEX:
3450 ifr->ifr_ifindex = dev->ifindex;
3451 return 0;
3452
3453 case SIOCGIFTXQLEN:
3454 ifr->ifr_qlen = dev->tx_queue_len;
3455 return 0;
3456
3457 default:
3458 /* dev_ioctl() should ensure this case
3459 * is never reached
3460 */
3461 WARN_ON(1);
3462 err = -EINVAL;
3463 break;
3464
3465 }
3466 return err;
3467}
3468
3469/*
3470 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3471 */
3472static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3473{
3474 int err;
3475 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3476
3477 if (!dev)
3478 return -ENODEV;
3479
3480 switch (cmd) {
3481 case SIOCSIFFLAGS: /* Set interface flags */
3482 return dev_change_flags(dev, ifr->ifr_flags);
3483
3484 case SIOCSIFMETRIC: /* Set the metric on the interface
3485 (currently unused) */
3486 return -EOPNOTSUPP;
3487
3488 case SIOCSIFMTU: /* Set the MTU of a device */
3489 return dev_set_mtu(dev, ifr->ifr_mtu);
3490
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 case SIOCSIFHWADDR:
3492 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3493
3494 case SIOCSIFHWBROADCAST:
3495 if (ifr->ifr_hwaddr.sa_family != dev->type)
3496 return -EINVAL;
3497 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3498 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003499 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 return 0;
3501
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 case SIOCSIFMAP:
3503 if (dev->set_config) {
3504 if (!netif_device_present(dev))
3505 return -ENODEV;
3506 return dev->set_config(dev, &ifr->ifr_map);
3507 }
3508 return -EOPNOTSUPP;
3509
3510 case SIOCADDMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003511 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3513 return -EINVAL;
3514 if (!netif_device_present(dev))
3515 return -ENODEV;
3516 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3517 dev->addr_len, 1);
3518
3519 case SIOCDELMULTI:
Patrick McHardy61ee6bd2008-03-26 02:12:11 -07003520 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3522 return -EINVAL;
3523 if (!netif_device_present(dev))
3524 return -ENODEV;
3525 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3526 dev->addr_len, 1);
3527
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 case SIOCSIFTXQLEN:
3529 if (ifr->ifr_qlen < 0)
3530 return -EINVAL;
3531 dev->tx_queue_len = ifr->ifr_qlen;
3532 return 0;
3533
3534 case SIOCSIFNAME:
3535 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3536 return dev_change_name(dev, ifr->ifr_newname);
3537
3538 /*
3539 * Unknown or private ioctl
3540 */
3541
3542 default:
3543 if ((cmd >= SIOCDEVPRIVATE &&
3544 cmd <= SIOCDEVPRIVATE + 15) ||
3545 cmd == SIOCBONDENSLAVE ||
3546 cmd == SIOCBONDRELEASE ||
3547 cmd == SIOCBONDSETHWADDR ||
3548 cmd == SIOCBONDSLAVEINFOQUERY ||
3549 cmd == SIOCBONDINFOQUERY ||
3550 cmd == SIOCBONDCHANGEACTIVE ||
3551 cmd == SIOCGMIIPHY ||
3552 cmd == SIOCGMIIREG ||
3553 cmd == SIOCSMIIREG ||
3554 cmd == SIOCBRADDIF ||
3555 cmd == SIOCBRDELIF ||
3556 cmd == SIOCWANDEV) {
3557 err = -EOPNOTSUPP;
3558 if (dev->do_ioctl) {
3559 if (netif_device_present(dev))
3560 err = dev->do_ioctl(dev, ifr,
3561 cmd);
3562 else
3563 err = -ENODEV;
3564 }
3565 } else
3566 err = -EINVAL;
3567
3568 }
3569 return err;
3570}
3571
3572/*
3573 * This function handles all "interface"-type I/O control requests. The actual
3574 * 'doing' part of this is dev_ifsioc above.
3575 */
3576
3577/**
3578 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003579 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 * @cmd: command to issue
3581 * @arg: pointer to a struct ifreq in user space
3582 *
3583 * Issue ioctl functions to devices. This is normally called by the
3584 * user space syscall interfaces but can sometimes be useful for
3585 * other purposes. The return value is the return from the syscall if
3586 * positive or a negative errno code on error.
3587 */
3588
Eric W. Biederman881d9662007-09-17 11:56:21 -07003589int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590{
3591 struct ifreq ifr;
3592 int ret;
3593 char *colon;
3594
3595 /* One special case: SIOCGIFCONF takes ifconf argument
3596 and requires shared lock, because it sleeps writing
3597 to user space.
3598 */
3599
3600 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003601 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003602 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08003603 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 return ret;
3605 }
3606 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003607 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608
3609 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3610 return -EFAULT;
3611
3612 ifr.ifr_name[IFNAMSIZ-1] = 0;
3613
3614 colon = strchr(ifr.ifr_name, ':');
3615 if (colon)
3616 *colon = 0;
3617
3618 /*
3619 * See which interface the caller is talking about.
3620 */
3621
3622 switch (cmd) {
3623 /*
3624 * These ioctl calls:
3625 * - can be done by all.
3626 * - atomic and do not require locking.
3627 * - return a value
3628 */
3629 case SIOCGIFFLAGS:
3630 case SIOCGIFMETRIC:
3631 case SIOCGIFMTU:
3632 case SIOCGIFHWADDR:
3633 case SIOCGIFSLAVE:
3634 case SIOCGIFMAP:
3635 case SIOCGIFINDEX:
3636 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003637 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07003639 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 read_unlock(&dev_base_lock);
3641 if (!ret) {
3642 if (colon)
3643 *colon = ':';
3644 if (copy_to_user(arg, &ifr,
3645 sizeof(struct ifreq)))
3646 ret = -EFAULT;
3647 }
3648 return ret;
3649
3650 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003651 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003652 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003653 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 rtnl_unlock();
3655 if (!ret) {
3656 if (colon)
3657 *colon = ':';
3658 if (copy_to_user(arg, &ifr,
3659 sizeof(struct ifreq)))
3660 ret = -EFAULT;
3661 }
3662 return ret;
3663
3664 /*
3665 * These ioctl calls:
3666 * - require superuser power.
3667 * - require strict serialization.
3668 * - return a value
3669 */
3670 case SIOCGMIIPHY:
3671 case SIOCGMIIREG:
3672 case SIOCSIFNAME:
3673 if (!capable(CAP_NET_ADMIN))
3674 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003675 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003677 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678 rtnl_unlock();
3679 if (!ret) {
3680 if (colon)
3681 *colon = ':';
3682 if (copy_to_user(arg, &ifr,
3683 sizeof(struct ifreq)))
3684 ret = -EFAULT;
3685 }
3686 return ret;
3687
3688 /*
3689 * These ioctl calls:
3690 * - require superuser power.
3691 * - require strict serialization.
3692 * - do not return a value
3693 */
3694 case SIOCSIFFLAGS:
3695 case SIOCSIFMETRIC:
3696 case SIOCSIFMTU:
3697 case SIOCSIFMAP:
3698 case SIOCSIFHWADDR:
3699 case SIOCSIFSLAVE:
3700 case SIOCADDMULTI:
3701 case SIOCDELMULTI:
3702 case SIOCSIFHWBROADCAST:
3703 case SIOCSIFTXQLEN:
3704 case SIOCSMIIREG:
3705 case SIOCBONDENSLAVE:
3706 case SIOCBONDRELEASE:
3707 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 case SIOCBONDCHANGEACTIVE:
3709 case SIOCBRADDIF:
3710 case SIOCBRDELIF:
3711 if (!capable(CAP_NET_ADMIN))
3712 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08003713 /* fall through */
3714 case SIOCBONDSLAVEINFOQUERY:
3715 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003716 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003718 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 rtnl_unlock();
3720 return ret;
3721
3722 case SIOCGIFMEM:
3723 /* Get the per device memory space. We can add this but
3724 * currently do not support it */
3725 case SIOCSIFMEM:
3726 /* Set the per device memory buffer space.
3727 * Not applicable in our case */
3728 case SIOCSIFLINK:
3729 return -EINVAL;
3730
3731 /*
3732 * Unknown or private ioctl.
3733 */
3734 default:
3735 if (cmd == SIOCWANDEV ||
3736 (cmd >= SIOCDEVPRIVATE &&
3737 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003738 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07003740 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 rtnl_unlock();
3742 if (!ret && copy_to_user(arg, &ifr,
3743 sizeof(struct ifreq)))
3744 ret = -EFAULT;
3745 return ret;
3746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07003748 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003749 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 return -EINVAL;
3751 }
3752}
3753
3754
3755/**
3756 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07003757 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 *
3759 * Returns a suitable unique value for a new device interface
3760 * number. The caller must hold the rtnl semaphore or the
3761 * dev_base_lock to be sure it remains unique.
3762 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003763static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764{
3765 static int ifindex;
3766 for (;;) {
3767 if (++ifindex <= 0)
3768 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003769 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 return ifindex;
3771 }
3772}
3773
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774/* Delayed registration/unregisteration */
3775static DEFINE_SPINLOCK(net_todo_list_lock);
Denis Cheng3b5b34f2007-12-07 00:49:17 -08003776static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777
Stephen Hemminger6f05f622007-03-08 20:46:03 -08003778static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779{
3780 spin_lock(&net_todo_list_lock);
3781 list_add_tail(&dev->todo_list, &net_todo_list);
3782 spin_unlock(&net_todo_list_lock);
3783}
3784
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003785static void rollback_registered(struct net_device *dev)
3786{
3787 BUG_ON(dev_boot_phase);
3788 ASSERT_RTNL();
3789
3790 /* Some devices call without registering for initialization unwind. */
3791 if (dev->reg_state == NETREG_UNINITIALIZED) {
3792 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3793 "was registered\n", dev->name, dev);
3794
3795 WARN_ON(1);
3796 return;
3797 }
3798
3799 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3800
3801 /* If device is running, close it first. */
3802 dev_close(dev);
3803
3804 /* And unlink it from device chain. */
3805 unlist_netdevice(dev);
3806
3807 dev->reg_state = NETREG_UNREGISTERING;
3808
3809 synchronize_net();
3810
3811 /* Shutdown queueing discipline. */
3812 dev_shutdown(dev);
3813
3814
3815 /* Notify protocols, that we are about to destroy
3816 this device. They should clean all the things.
3817 */
3818 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3819
3820 /*
3821 * Flush the unicast and multicast chains
3822 */
3823 dev_addr_discard(dev);
3824
3825 if (dev->uninit)
3826 dev->uninit(dev);
3827
3828 /* Notifier chain MUST detach us from master device. */
3829 BUG_TRAP(!dev->master);
3830
3831 /* Remove entries from kobject tree */
3832 netdev_unregister_kobject(dev);
3833
3834 synchronize_net();
3835
3836 dev_put(dev);
3837}
3838
David S. Millere8a04642008-07-17 00:34:19 -07003839static void __netdev_init_queue_locks_one(struct net_device *dev,
3840 struct netdev_queue *dev_queue,
3841 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07003842{
3843 spin_lock_init(&dev_queue->_xmit_lock);
3844 netdev_set_lockdep_class(&dev_queue->_xmit_lock, dev->type);
3845 dev_queue->xmit_lock_owner = -1;
3846}
3847
3848static void netdev_init_queue_locks(struct net_device *dev)
3849{
David S. Millere8a04642008-07-17 00:34:19 -07003850 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3851 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07003852}
3853
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854/**
3855 * register_netdevice - register a network device
3856 * @dev: device to register
3857 *
3858 * Take a completed network device structure and add it to the kernel
3859 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3860 * chain. 0 is returned on success. A negative errno code is returned
3861 * on a failure to set up the device, or if the name is a duplicate.
3862 *
3863 * Callers must hold the rtnl semaphore. You may want
3864 * register_netdev() instead of this.
3865 *
3866 * BUGS:
3867 * The locking appears insufficient to guarantee two parallel registers
3868 * will not get the same name.
3869 */
3870
3871int register_netdevice(struct net_device *dev)
3872{
3873 struct hlist_head *head;
3874 struct hlist_node *p;
3875 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003876 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877
3878 BUG_ON(dev_boot_phase);
3879 ASSERT_RTNL();
3880
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003881 might_sleep();
3882
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 /* When net_device's are persistent, this will be fatal. */
3884 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003885 BUG_ON(!dev_net(dev));
3886 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887
David S. Millerf1f28aa2008-07-15 00:08:33 -07003888 spin_lock_init(&dev->addr_list_lock);
David S. Millerc773e842008-07-08 23:13:53 -07003889 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 dev->iflink = -1;
3892
3893 /* Init, if this function is available */
3894 if (dev->init) {
3895 ret = dev->init(dev);
3896 if (ret) {
3897 if (ret > 0)
3898 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08003899 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 }
3901 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003902
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903 if (!dev_valid_name(dev->name)) {
3904 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003905 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 }
3907
Eric W. Biederman881d9662007-09-17 11:56:21 -07003908 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 if (dev->iflink == -1)
3910 dev->iflink = dev->ifindex;
3911
3912 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07003913 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 hlist_for_each(p, head) {
3915 struct net_device *d
3916 = hlist_entry(p, struct net_device, name_hlist);
3917 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3918 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003919 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922
Stephen Hemmingerd212f872007-06-27 00:47:37 -07003923 /* Fix illegal checksum combinations */
3924 if ((dev->features & NETIF_F_HW_CSUM) &&
3925 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3926 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3927 dev->name);
3928 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3929 }
3930
3931 if ((dev->features & NETIF_F_NO_CSUM) &&
3932 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3933 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3934 dev->name);
3935 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3936 }
3937
3938
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 /* Fix illegal SG+CSUM combinations. */
3940 if ((dev->features & NETIF_F_SG) &&
Herbert Xu8648b302006-06-17 22:06:05 -07003941 !(dev->features & NETIF_F_ALL_CSUM)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003942 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 dev->name);
3944 dev->features &= ~NETIF_F_SG;
3945 }
3946
3947 /* TSO requires that SG is present as well. */
3948 if ((dev->features & NETIF_F_TSO) &&
3949 !(dev->features & NETIF_F_SG)) {
Stephen Hemminger5a8da022006-07-07 16:54:05 -07003950 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 dev->name);
3952 dev->features &= ~NETIF_F_TSO;
3953 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07003954 if (dev->features & NETIF_F_UFO) {
3955 if (!(dev->features & NETIF_F_HW_CSUM)) {
3956 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3957 "NETIF_F_HW_CSUM feature.\n",
3958 dev->name);
3959 dev->features &= ~NETIF_F_UFO;
3960 }
3961 if (!(dev->features & NETIF_F_SG)) {
3962 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3963 "NETIF_F_SG feature.\n",
3964 dev->name);
3965 dev->features &= ~NETIF_F_UFO;
3966 }
3967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07003969 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07003970 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003971 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003972 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07003973 dev->reg_state = NETREG_REGISTERED;
3974
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 /*
3976 * Default initial state at registry is that the
3977 * device is present.
3978 */
3979
3980 set_bit(__LINK_STATE_PRESENT, &dev->state);
3981
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02003984 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985
3986 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07003987 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07003988 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07003989 if (ret) {
3990 rollback_registered(dev);
3991 dev->reg_state = NETREG_UNREGISTERED;
3992 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
3994out:
3995 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07003996
3997err_uninit:
3998 if (dev->uninit)
3999 dev->uninit(dev);
4000 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001}
4002
4003/**
4004 * register_netdev - register a network device
4005 * @dev: device to register
4006 *
4007 * Take a completed network device structure and add it to the kernel
4008 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4009 * chain. 0 is returned on success. A negative errno code is returned
4010 * on a failure to set up the device, or if the name is a duplicate.
4011 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004012 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004013 * and expands the device name if you passed a format string to
4014 * alloc_netdev.
4015 */
4016int register_netdev(struct net_device *dev)
4017{
4018 int err;
4019
4020 rtnl_lock();
4021
4022 /*
4023 * If the name is a format string the caller wants us to do a
4024 * name allocation.
4025 */
4026 if (strchr(dev->name, '%')) {
4027 err = dev_alloc_name(dev, dev->name);
4028 if (err < 0)
4029 goto out;
4030 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004031
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 err = register_netdevice(dev);
4033out:
4034 rtnl_unlock();
4035 return err;
4036}
4037EXPORT_SYMBOL(register_netdev);
4038
4039/*
4040 * netdev_wait_allrefs - wait until all references are gone.
4041 *
4042 * This is called when unregistering network devices.
4043 *
4044 * Any protocol or device that holds a reference should register
4045 * for netdevice notification, and cleanup and put back the
4046 * reference if they receive an UNREGISTER event.
4047 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004048 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 */
4050static void netdev_wait_allrefs(struct net_device *dev)
4051{
4052 unsigned long rebroadcast_time, warning_time;
4053
4054 rebroadcast_time = warning_time = jiffies;
4055 while (atomic_read(&dev->refcnt) != 0) {
4056 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004057 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
4059 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004060 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
4062 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4063 &dev->state)) {
4064 /* We must not have linkwatch events
4065 * pending on unregister. If this
4066 * happens, we simply run the queue
4067 * unscheduled, resulting in a noop
4068 * for this device.
4069 */
4070 linkwatch_run_queue();
4071 }
4072
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004073 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074
4075 rebroadcast_time = jiffies;
4076 }
4077
4078 msleep(250);
4079
4080 if (time_after(jiffies, warning_time + 10 * HZ)) {
4081 printk(KERN_EMERG "unregister_netdevice: "
4082 "waiting for %s to become free. Usage "
4083 "count = %d\n",
4084 dev->name, atomic_read(&dev->refcnt));
4085 warning_time = jiffies;
4086 }
4087 }
4088}
4089
4090/* The sequence is:
4091 *
4092 * rtnl_lock();
4093 * ...
4094 * register_netdevice(x1);
4095 * register_netdevice(x2);
4096 * ...
4097 * unregister_netdevice(y1);
4098 * unregister_netdevice(y2);
4099 * ...
4100 * rtnl_unlock();
4101 * free_netdev(y1);
4102 * free_netdev(y2);
4103 *
4104 * We are invoked by rtnl_unlock() after it drops the semaphore.
4105 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004106 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107 * without deadlocking with linkwatch via keventd.
4108 * 2) Since we run with the RTNL semaphore not held, we can sleep
4109 * safely in order to wait for the netdev refcnt to drop to zero.
4110 */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004111static DEFINE_MUTEX(net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112void netdev_run_todo(void)
4113{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004114 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
4116 /* Need to guard against multiple cpu's getting out of order. */
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004117 mutex_lock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118
4119 /* Not safe to do outside the semaphore. We must not return
4120 * until all unregister events invoked by the local processor
4121 * have been completed (either by this todo run, or one on
4122 * another cpu).
4123 */
4124 if (list_empty(&net_todo_list))
4125 goto out;
4126
4127 /* Snapshot list, allow later requests */
4128 spin_lock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004129 list_replace_init(&net_todo_list, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 spin_unlock(&net_todo_list_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 while (!list_empty(&list)) {
4133 struct net_device *dev
4134 = list_entry(list.next, struct net_device, todo_list);
4135 list_del(&dev->todo_list);
4136
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004137 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 printk(KERN_ERR "network todo '%s' but state %d\n",
4139 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004140 dump_stack();
4141 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004143
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004144 dev->reg_state = NETREG_UNREGISTERED;
4145
4146 netdev_wait_allrefs(dev);
4147
4148 /* paranoia */
4149 BUG_ON(atomic_read(&dev->refcnt));
4150 BUG_TRAP(!dev->ip_ptr);
4151 BUG_TRAP(!dev->ip6_ptr);
4152 BUG_TRAP(!dev->dn_ptr);
4153
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004154 if (dev->destructor)
4155 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07004156
4157 /* Free network device */
4158 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 }
4160
4161out:
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -08004162 mutex_unlock(&net_todo_run_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163}
4164
Rusty Russell5a1b5892007-04-28 21:04:03 -07004165static struct net_device_stats *internal_stats(struct net_device *dev)
Rusty Russellc45d2862007-03-28 14:29:08 -07004166{
Rusty Russell5a1b5892007-04-28 21:04:03 -07004167 return &dev->stats;
Rusty Russellc45d2862007-03-28 14:29:08 -07004168}
4169
David S. Millerdc2b4842008-07-08 17:18:23 -07004170static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07004171 struct netdev_queue *queue,
4172 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07004173{
4174 spin_lock_init(&queue->lock);
4175 queue->dev = dev;
4176}
4177
David S. Millerbb949fb2008-07-08 16:55:56 -07004178static void netdev_init_queues(struct net_device *dev)
4179{
David S. Millere8a04642008-07-17 00:34:19 -07004180 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4181 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerbb949fb2008-07-08 16:55:56 -07004182}
4183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004185 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 * @sizeof_priv: size of private data to allocate space for
4187 * @name: device name format string
4188 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004189 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 *
4191 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004192 * and performs basic initialization. Also allocates subquue structs
4193 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004195struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4196 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197{
David S. Millere8a04642008-07-17 00:34:19 -07004198 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 struct net_device *dev;
4200 int alloc_size;
David S. Millere8a04642008-07-17 00:34:19 -07004201 void *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004203 BUG_ON(strlen(name) >= sizeof(dev->name));
4204
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004205 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07004206 if (sizeof_priv) {
4207 /* ensure 32-byte alignment of private area */
4208 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4209 alloc_size += sizeof_priv;
4210 }
4211 /* ensure 32-byte alignment of whole construct */
4212 alloc_size += NETDEV_ALIGN_CONST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07004214 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07004216 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 return NULL;
4218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219
David S. Millere8a04642008-07-17 00:34:19 -07004220 tx = kzalloc(sizeof(struct netdev_queue) * queue_count, GFP_KERNEL);
4221 if (!tx) {
4222 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4223 "tx qdiscs.\n");
4224 kfree(p);
4225 return NULL;
4226 }
4227
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 dev = (struct net_device *)
4229 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4230 dev->padded = (char *)dev - (char *)p;
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004231 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232
David S. Millere8a04642008-07-17 00:34:19 -07004233 dev->_tx = tx;
4234 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004235 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07004236
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004237 if (sizeof_priv) {
4238 dev->priv = ((char *)dev +
David S. Millerfd2ea0a2008-07-17 01:56:23 -07004239 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004240 & ~NETDEV_ALIGN_CONST));
4241 }
4242
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07004243 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
David S. Millerbb949fb2008-07-08 16:55:56 -07004245 netdev_init_queues(dev);
4246
Rusty Russell5a1b5892007-04-28 21:04:03 -07004247 dev->get_stats = internal_stats;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004248 netpoll_netdev_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 setup(dev);
4250 strcpy(dev->name, name);
4251 return dev;
4252}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07004253EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
4255/**
4256 * free_netdev - free network device
4257 * @dev: device
4258 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004259 * This function does the last stage of destroying an allocated device
4260 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261 * If this is the last reference then it will be freed.
4262 */
4263void free_netdev(struct net_device *dev)
4264{
Denis V. Lunevf3005d72008-04-16 02:02:18 -07004265 release_net(dev_net(dev));
4266
David S. Millere8a04642008-07-17 00:34:19 -07004267 kfree(dev->_tx);
4268
Stephen Hemminger3041a062006-05-26 13:25:24 -07004269 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270 if (dev->reg_state == NETREG_UNINITIALIZED) {
4271 kfree((char *)dev - dev->padded);
4272 return;
4273 }
4274
4275 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4276 dev->reg_state = NETREG_RELEASED;
4277
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07004278 /* will free via device release */
4279 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004281
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282/* Synchronize with packet receive processing. */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004283void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284{
4285 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07004286 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287}
4288
4289/**
4290 * unregister_netdevice - remove device from the kernel
4291 * @dev: device
4292 *
4293 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004294 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 *
4296 * Callers must hold the rtnl semaphore. You may want
4297 * unregister_netdev() instead of this.
4298 */
4299
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08004300void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301{
Herbert Xua6620712007-12-12 19:21:56 -08004302 ASSERT_RTNL();
4303
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004304 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 /* Finish processing unregister after unlock */
4306 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307}
4308
4309/**
4310 * unregister_netdev - remove device from the kernel
4311 * @dev: device
4312 *
4313 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08004314 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 *
4316 * This is just a wrapper for unregister_netdevice that takes
4317 * the rtnl semaphore. In general you want to use this and not
4318 * unregister_netdevice.
4319 */
4320void unregister_netdev(struct net_device *dev)
4321{
4322 rtnl_lock();
4323 unregister_netdevice(dev);
4324 rtnl_unlock();
4325}
4326
4327EXPORT_SYMBOL(unregister_netdev);
4328
Eric W. Biedermance286d32007-09-12 13:53:49 +02004329/**
4330 * dev_change_net_namespace - move device to different nethost namespace
4331 * @dev: device
4332 * @net: network namespace
4333 * @pat: If not NULL name pattern to try if the current device name
4334 * is already taken in the destination network namespace.
4335 *
4336 * This function shuts down a device interface and moves it
4337 * to a new network namespace. On success 0 is returned, on
4338 * a failure a netagive errno code is returned.
4339 *
4340 * Callers must hold the rtnl semaphore.
4341 */
4342
4343int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4344{
4345 char buf[IFNAMSIZ];
4346 const char *destname;
4347 int err;
4348
4349 ASSERT_RTNL();
4350
4351 /* Don't allow namespace local devices to be moved. */
4352 err = -EINVAL;
4353 if (dev->features & NETIF_F_NETNS_LOCAL)
4354 goto out;
4355
4356 /* Ensure the device has been registrered */
4357 err = -EINVAL;
4358 if (dev->reg_state != NETREG_REGISTERED)
4359 goto out;
4360
4361 /* Get out if there is nothing todo */
4362 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09004363 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02004364 goto out;
4365
4366 /* Pick the destination device name, and ensure
4367 * we can use it in the destination network namespace.
4368 */
4369 err = -EEXIST;
4370 destname = dev->name;
4371 if (__dev_get_by_name(net, destname)) {
4372 /* We get here if we can't use the current device name */
4373 if (!pat)
4374 goto out;
4375 if (!dev_valid_name(pat))
4376 goto out;
4377 if (strchr(pat, '%')) {
4378 if (__dev_alloc_name(net, pat, buf) < 0)
4379 goto out;
4380 destname = buf;
4381 } else
4382 destname = pat;
4383 if (__dev_get_by_name(net, destname))
4384 goto out;
4385 }
4386
4387 /*
4388 * And now a mini version of register_netdevice unregister_netdevice.
4389 */
4390
4391 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07004392 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004393
4394 /* And unlink it from device chain */
4395 err = -ENODEV;
4396 unlist_netdevice(dev);
4397
4398 synchronize_net();
4399
4400 /* Shutdown queueing discipline. */
4401 dev_shutdown(dev);
4402
4403 /* Notify protocols, that we are about to destroy
4404 this device. They should clean all the things.
4405 */
4406 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4407
4408 /*
4409 * Flush the unicast and multicast chains
4410 */
4411 dev_addr_discard(dev);
4412
4413 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004414 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004415
4416 /* Assign the new device name */
4417 if (destname != dev->name)
4418 strcpy(dev->name, destname);
4419
4420 /* If there is an ifindex conflict assign a new one */
4421 if (__dev_get_by_index(net, dev->ifindex)) {
4422 int iflink = (dev->iflink == dev->ifindex);
4423 dev->ifindex = dev_new_index(net);
4424 if (iflink)
4425 dev->iflink = dev->ifindex;
4426 }
4427
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004428 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004429 netdev_unregister_kobject(dev);
4430 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004431 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004432
4433 /* Add the device back in the hashes */
4434 list_netdevice(dev);
4435
4436 /* Notify protocols, that a new device appeared. */
4437 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4438
4439 synchronize_net();
4440 err = 0;
4441out:
4442 return err;
4443}
4444
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445static int dev_cpu_callback(struct notifier_block *nfb,
4446 unsigned long action,
4447 void *ocpu)
4448{
4449 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07004450 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 struct sk_buff *skb;
4452 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4453 struct softnet_data *sd, *oldsd;
4454
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07004455 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 return NOTIFY_OK;
4457
4458 local_irq_disable();
4459 cpu = smp_processor_id();
4460 sd = &per_cpu(softnet_data, cpu);
4461 oldsd = &per_cpu(softnet_data, oldcpu);
4462
4463 /* Find end of our completion_queue. */
4464 list_skb = &sd->completion_queue;
4465 while (*list_skb)
4466 list_skb = &(*list_skb)->next;
4467 /* Append completion queue from offline CPU. */
4468 *list_skb = oldsd->completion_queue;
4469 oldsd->completion_queue = NULL;
4470
4471 /* Find end of our output_queue. */
4472 list_net = &sd->output_queue;
4473 while (*list_net)
4474 list_net = &(*list_net)->next_sched;
4475 /* Append output queue from offline CPU. */
4476 *list_net = oldsd->output_queue;
4477 oldsd->output_queue = NULL;
4478
4479 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4480 local_irq_enable();
4481
4482 /* Process offline CPU's input_pkt_queue */
4483 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4484 netif_rx(skb);
4485
4486 return NOTIFY_OK;
4487}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488
Chris Leechdb217332006-06-17 21:24:58 -07004489#ifdef CONFIG_NET_DMA
4490/**
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004491 * net_dma_rebalance - try to maintain one DMA channel per CPU
4492 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4493 *
4494 * This is called when the number of channels allocated to the net_dma client
4495 * changes. The net_dma client tries to have one DMA channel per CPU.
Chris Leechdb217332006-06-17 21:24:58 -07004496 */
Dan Williamsd379b012007-07-09 11:56:42 -07004497
4498static void net_dma_rebalance(struct net_dma *net_dma)
Chris Leechdb217332006-06-17 21:24:58 -07004499{
Dan Williamsd379b012007-07-09 11:56:42 -07004500 unsigned int cpu, i, n, chan_idx;
Chris Leechdb217332006-06-17 21:24:58 -07004501 struct dma_chan *chan;
4502
Dan Williamsd379b012007-07-09 11:56:42 -07004503 if (cpus_empty(net_dma->channel_mask)) {
Chris Leechdb217332006-06-17 21:24:58 -07004504 for_each_online_cpu(cpu)
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004505 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
Chris Leechdb217332006-06-17 21:24:58 -07004506 return;
4507 }
4508
4509 i = 0;
4510 cpu = first_cpu(cpu_online_map);
4511
Dan Williamsd379b012007-07-09 11:56:42 -07004512 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
4513 chan = net_dma->channels[chan_idx];
4514
4515 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4516 + (i < (num_online_cpus() %
4517 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
Chris Leechdb217332006-06-17 21:24:58 -07004518
4519 while(n) {
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07004520 per_cpu(softnet_data, cpu).net_dma = chan;
Chris Leechdb217332006-06-17 21:24:58 -07004521 cpu = next_cpu(cpu, cpu_online_map);
4522 n--;
4523 }
4524 i++;
4525 }
Chris Leechdb217332006-06-17 21:24:58 -07004526}
4527
4528/**
4529 * netdev_dma_event - event callback for the net_dma_client
4530 * @client: should always be net_dma_client
Randy Dunlapf4b8ea72006-06-22 16:00:11 -07004531 * @chan: DMA channel for the event
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07004532 * @state: DMA state to be handled
Chris Leechdb217332006-06-17 21:24:58 -07004533 */
Dan Williamsd379b012007-07-09 11:56:42 -07004534static enum dma_state_client
4535netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4536 enum dma_state state)
Chris Leechdb217332006-06-17 21:24:58 -07004537{
Dan Williamsd379b012007-07-09 11:56:42 -07004538 int i, found = 0, pos = -1;
4539 struct net_dma *net_dma =
4540 container_of(client, struct net_dma, client);
4541 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4542
4543 spin_lock(&net_dma->lock);
4544 switch (state) {
4545 case DMA_RESOURCE_AVAILABLE:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004546 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004547 if (net_dma->channels[i] == chan) {
4548 found = 1;
4549 break;
4550 } else if (net_dma->channels[i] == NULL && pos < 0)
4551 pos = i;
4552
4553 if (!found && pos >= 0) {
4554 ack = DMA_ACK;
4555 net_dma->channels[pos] = chan;
4556 cpu_set(pos, net_dma->channel_mask);
4557 net_dma_rebalance(net_dma);
4558 }
Chris Leechdb217332006-06-17 21:24:58 -07004559 break;
4560 case DMA_RESOURCE_REMOVED:
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004561 for (i = 0; i < nr_cpu_ids; i++)
Dan Williamsd379b012007-07-09 11:56:42 -07004562 if (net_dma->channels[i] == chan) {
4563 found = 1;
4564 pos = i;
4565 break;
4566 }
4567
4568 if (found) {
4569 ack = DMA_ACK;
4570 cpu_clear(pos, net_dma->channel_mask);
4571 net_dma->channels[i] = NULL;
4572 net_dma_rebalance(net_dma);
4573 }
Chris Leechdb217332006-06-17 21:24:58 -07004574 break;
4575 default:
4576 break;
4577 }
Dan Williamsd379b012007-07-09 11:56:42 -07004578 spin_unlock(&net_dma->lock);
4579
4580 return ack;
Chris Leechdb217332006-06-17 21:24:58 -07004581}
4582
4583/**
4584 * netdev_dma_regiser - register the networking subsystem as a DMA client
4585 */
4586static int __init netdev_dma_register(void)
4587{
Mike Travis0c0b0ac2008-05-02 16:43:08 -07004588 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4589 GFP_KERNEL);
4590 if (unlikely(!net_dma.channels)) {
4591 printk(KERN_NOTICE
4592 "netdev_dma: no memory for net_dma.channels\n");
4593 return -ENOMEM;
4594 }
Dan Williamsd379b012007-07-09 11:56:42 -07004595 spin_lock_init(&net_dma.lock);
4596 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4597 dma_async_client_register(&net_dma.client);
4598 dma_async_client_chan_request(&net_dma.client);
Chris Leechdb217332006-06-17 21:24:58 -07004599 return 0;
4600}
4601
4602#else
4603static int __init netdev_dma_register(void) { return -ENODEV; }
4604#endif /* CONFIG_NET_DMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605
Herbert Xu7f353bf2007-08-10 15:47:58 -07004606/**
4607 * netdev_compute_feature - compute conjunction of two feature sets
4608 * @all: first feature set
4609 * @one: second feature set
4610 *
4611 * Computes a new feature set after adding a device with feature set
4612 * @one to the master device with current feature set @all. Returns
4613 * the new feature set.
4614 */
4615int netdev_compute_features(unsigned long all, unsigned long one)
4616{
4617 /* if device needs checksumming, downgrade to hw checksumming */
4618 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4619 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4620
4621 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4622 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4623 all ^= NETIF_F_HW_CSUM
4624 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4625
4626 if (one & NETIF_F_GSO)
4627 one |= NETIF_F_GSO_SOFTWARE;
4628 one |= NETIF_F_GSO;
4629
4630 /* If even one device supports robust GSO, enable it for all. */
4631 if (one & NETIF_F_GSO_ROBUST)
4632 all |= NETIF_F_GSO_ROBUST;
4633
4634 all &= one | NETIF_F_LLTX;
4635
4636 if (!(all & NETIF_F_ALL_CSUM))
4637 all &= ~NETIF_F_SG;
4638 if (!(all & NETIF_F_SG))
4639 all &= ~NETIF_F_GSO_MASK;
4640
4641 return all;
4642}
4643EXPORT_SYMBOL(netdev_compute_features);
4644
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004645static struct hlist_head *netdev_create_hash(void)
4646{
4647 int i;
4648 struct hlist_head *hash;
4649
4650 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4651 if (hash != NULL)
4652 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4653 INIT_HLIST_HEAD(&hash[i]);
4654
4655 return hash;
4656}
4657
Eric W. Biederman881d9662007-09-17 11:56:21 -07004658/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07004659static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004660{
Eric W. Biederman881d9662007-09-17 11:56:21 -07004661 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07004662
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004663 net->dev_name_head = netdev_create_hash();
4664 if (net->dev_name_head == NULL)
4665 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004666
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004667 net->dev_index_head = netdev_create_hash();
4668 if (net->dev_index_head == NULL)
4669 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004670
4671 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07004672
4673err_idx:
4674 kfree(net->dev_name_head);
4675err_name:
4676 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004677}
4678
Pavel Emelyanov46650792007-10-08 20:38:39 -07004679static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004680{
4681 kfree(net->dev_name_head);
4682 kfree(net->dev_index_head);
4683}
4684
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004685static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004686 .init = netdev_init,
4687 .exit = netdev_exit,
4688};
4689
Pavel Emelyanov46650792007-10-08 20:38:39 -07004690static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02004691{
4692 struct net_device *dev, *next;
4693 /*
4694 * Push all migratable of the network devices back to the
4695 * initial network namespace
4696 */
4697 rtnl_lock();
4698 for_each_netdev_safe(net, dev, next) {
4699 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004700 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02004701
4702 /* Ignore unmoveable devices (i.e. loopback) */
4703 if (dev->features & NETIF_F_NETNS_LOCAL)
4704 continue;
4705
4706 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004707 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4708 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004709 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004710 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02004711 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07004712 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02004713 }
4714 }
4715 rtnl_unlock();
4716}
4717
Denis V. Lunev022cbae2007-11-13 03:23:50 -08004718static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02004719 .exit = default_device_exit,
4720};
4721
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722/*
4723 * Initialize the DEV module. At boot time this walks the device list and
4724 * unhooks any devices that fail to initialise (normally hardware not
4725 * present) and leaves us with a valid list of present and active devices.
4726 *
4727 */
4728
4729/*
4730 * This is called single threaded during boot, so no need
4731 * to take the rtnl semaphore.
4732 */
4733static int __init net_dev_init(void)
4734{
4735 int i, rc = -ENOMEM;
4736
4737 BUG_ON(!dev_boot_phase);
4738
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 if (dev_proc_init())
4740 goto out;
4741
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004742 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 goto out;
4744
4745 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08004746 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747 INIT_LIST_HEAD(&ptype_base[i]);
4748
Eric W. Biederman881d9662007-09-17 11:56:21 -07004749 if (register_pernet_subsys(&netdev_net_ops))
4750 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751
Eric W. Biedermance286d32007-09-12 13:53:49 +02004752 if (register_pernet_device(&default_device_ops))
4753 goto out;
4754
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 /*
4756 * Initialise the packet receive queues.
4757 */
4758
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07004759 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760 struct softnet_data *queue;
4761
4762 queue = &per_cpu(softnet_data, i);
4763 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764 queue->completion_queue = NULL;
4765 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07004766
4767 queue->backlog.poll = process_backlog;
4768 queue->backlog.weight = weight_p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 }
4770
Chris Leechdb217332006-06-17 21:24:58 -07004771 netdev_dma_register();
4772
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 dev_boot_phase = 0;
4774
4775 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4776 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4777
4778 hotcpu_notifier(dev_cpu_callback, 0);
4779 dst_init();
4780 dev_mcast_init();
4781 rc = 0;
4782out:
4783 return rc;
4784}
4785
4786subsys_initcall(net_dev_init);
4787
4788EXPORT_SYMBOL(__dev_get_by_index);
4789EXPORT_SYMBOL(__dev_get_by_name);
4790EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08004791EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792EXPORT_SYMBOL(dev_add_pack);
4793EXPORT_SYMBOL(dev_alloc_name);
4794EXPORT_SYMBOL(dev_close);
4795EXPORT_SYMBOL(dev_get_by_flags);
4796EXPORT_SYMBOL(dev_get_by_index);
4797EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798EXPORT_SYMBOL(dev_open);
4799EXPORT_SYMBOL(dev_queue_xmit);
4800EXPORT_SYMBOL(dev_remove_pack);
4801EXPORT_SYMBOL(dev_set_allmulti);
4802EXPORT_SYMBOL(dev_set_promiscuity);
4803EXPORT_SYMBOL(dev_change_flags);
4804EXPORT_SYMBOL(dev_set_mtu);
4805EXPORT_SYMBOL(dev_set_mac_address);
4806EXPORT_SYMBOL(free_netdev);
4807EXPORT_SYMBOL(netdev_boot_setup_check);
4808EXPORT_SYMBOL(netdev_set_master);
4809EXPORT_SYMBOL(netdev_state_change);
4810EXPORT_SYMBOL(netif_receive_skb);
4811EXPORT_SYMBOL(netif_rx);
4812EXPORT_SYMBOL(register_gifconf);
4813EXPORT_SYMBOL(register_netdevice);
4814EXPORT_SYMBOL(register_netdevice_notifier);
4815EXPORT_SYMBOL(skb_checksum_help);
4816EXPORT_SYMBOL(synchronize_net);
4817EXPORT_SYMBOL(unregister_netdevice);
4818EXPORT_SYMBOL(unregister_netdevice_notifier);
4819EXPORT_SYMBOL(net_enable_timestamp);
4820EXPORT_SYMBOL(net_disable_timestamp);
4821EXPORT_SYMBOL(dev_get_flags);
4822
4823#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4824EXPORT_SYMBOL(br_handle_frame_hook);
4825EXPORT_SYMBOL(br_fdb_get_hook);
4826EXPORT_SYMBOL(br_fdb_put_hook);
4827#endif
4828
4829#ifdef CONFIG_KMOD
4830EXPORT_SYMBOL(dev_load);
4831#endif
4832
4833EXPORT_PER_CPU_SYMBOL(softnet_data);