blob: 4b335661697626fce798e4ed7b0d525067af745c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
400 struct list_head *head;
401 struct packet_type *pt1;
402
403 spin_lock_bh(&ptype_lock);
404
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700407 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800408 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 list_for_each_entry(pt1, head, list) {
411 if (pt == pt1) {
412 list_del_rcu(&pt->list);
413 goto out;
414 }
415 }
416
417 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
418out:
419 spin_unlock_bh(&ptype_lock);
420}
421/**
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
424 *
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
428 * returns.
429 *
430 * This call sleeps to guarantee that no CPU is looking at the packet
431 * type after return.
432 */
433void dev_remove_pack(struct packet_type *pt)
434{
435 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 synchronize_net();
438}
439
440/******************************************************************************
441
442 Device Boot-time Settings Routines
443
444*******************************************************************************/
445
446/* Boot time configuration table */
447static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
448
449/**
450 * netdev_boot_setup_add - add new setup entry
451 * @name: name of the device
452 * @map: configured settings for the device
453 *
454 * Adds new setup entry to the dev_boot_setup list. The function
455 * returns 0 on error and 1 on success. This is a generic routine to
456 * all netdevices.
457 */
458static int netdev_boot_setup_add(char *name, struct ifmap *map)
459{
460 struct netdev_boot_setup *s;
461 int i;
462
463 s = dev_boot_setup;
464 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
465 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
466 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700467 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 memcpy(&s[i].map, map, sizeof(s[i].map));
469 break;
470 }
471 }
472
473 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
474}
475
476/**
477 * netdev_boot_setup_check - check boot time settings
478 * @dev: the netdevice
479 *
480 * Check boot time settings for the device.
481 * The found settings are set for the device to be used
482 * later in the device probing.
483 * Returns 0 if no settings found, 1 if they are.
484 */
485int netdev_boot_setup_check(struct net_device *dev)
486{
487 struct netdev_boot_setup *s = dev_boot_setup;
488 int i;
489
490 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
491 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700492 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 dev->irq = s[i].map.irq;
494 dev->base_addr = s[i].map.base_addr;
495 dev->mem_start = s[i].map.mem_start;
496 dev->mem_end = s[i].map.mem_end;
497 return 1;
498 }
499 }
500 return 0;
501}
502
503
504/**
505 * netdev_boot_base - get address from boot time settings
506 * @prefix: prefix for network device
507 * @unit: id for network device
508 *
509 * Check boot time settings for the base address of device.
510 * The found settings are set for the device to be used
511 * later in the device probing.
512 * Returns 0 if no settings found.
513 */
514unsigned long netdev_boot_base(const char *prefix, int unit)
515{
516 const struct netdev_boot_setup *s = dev_boot_setup;
517 char name[IFNAMSIZ];
518 int i;
519
520 sprintf(name, "%s%d", prefix, unit);
521
522 /*
523 * If device already registered then return base of 1
524 * to indicate not to probe for this interface
525 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700526 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 return 1;
528
529 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
530 if (!strcmp(name, s[i].name))
531 return s[i].map.base_addr;
532 return 0;
533}
534
535/*
536 * Saves at boot time configured settings for any netdevice.
537 */
538int __init netdev_boot_setup(char *str)
539{
540 int ints[5];
541 struct ifmap map;
542
543 str = get_options(str, ARRAY_SIZE(ints), ints);
544 if (!str || !*str)
545 return 0;
546
547 /* Save settings */
548 memset(&map, 0, sizeof(map));
549 if (ints[0] > 0)
550 map.irq = ints[1];
551 if (ints[0] > 1)
552 map.base_addr = ints[2];
553 if (ints[0] > 2)
554 map.mem_start = ints[3];
555 if (ints[0] > 3)
556 map.mem_end = ints[4];
557
558 /* Add new entry to the list */
559 return netdev_boot_setup_add(str, &map);
560}
561
562__setup("netdev=", netdev_boot_setup);
563
564/*******************************************************************************
565
566 Device Interface Subroutines
567
568*******************************************************************************/
569
570/**
571 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700572 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 * @name: name to find
574 *
575 * Find an interface by name. Must be called under RTNL semaphore
576 * or @dev_base_lock. If the name is found a pointer to the device
577 * is returned. If the name is not found then %NULL is returned. The
578 * reference counters are not incremented so the caller must be
579 * careful with locks.
580 */
581
Eric W. Biederman881d9662007-09-17 11:56:21 -0700582struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
584 struct hlist_node *p;
585
Eric W. Biederman881d9662007-09-17 11:56:21 -0700586 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct net_device *dev
588 = hlist_entry(p, struct net_device, name_hlist);
589 if (!strncmp(dev->name, name, IFNAMSIZ))
590 return dev;
591 }
592 return NULL;
593}
594
595/**
596 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700597 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 * @name: name to find
599 *
600 * Find an interface by name. This can be called from any
601 * context and does its own locking. The returned handle has
602 * the usage count incremented and the caller must use dev_put() to
603 * release it when it is no longer needed. %NULL is returned if no
604 * matching device is found.
605 */
606
Eric W. Biederman881d9662007-09-17 11:56:21 -0700607struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 struct net_device *dev;
610
611 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700612 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (dev)
614 dev_hold(dev);
615 read_unlock(&dev_base_lock);
616 return dev;
617}
618
619/**
620 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700621 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 * @ifindex: index of device
623 *
624 * Search for an interface by index. Returns %NULL if the device
625 * is not found or a pointer to the device. The device has not
626 * had its reference counter increased so the caller must be careful
627 * about locking. The caller must hold either the RTNL semaphore
628 * or @dev_base_lock.
629 */
630
Eric W. Biederman881d9662007-09-17 11:56:21 -0700631struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 struct hlist_node *p;
634
Eric W. Biederman881d9662007-09-17 11:56:21 -0700635 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 struct net_device *dev
637 = hlist_entry(p, struct net_device, index_hlist);
638 if (dev->ifindex == ifindex)
639 return dev;
640 }
641 return NULL;
642}
643
644
645/**
646 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @ifindex: index of device
649 *
650 * Search for an interface by index. Returns NULL if the device
651 * is not found or a pointer to the device. The device returned has
652 * had a reference added and the pointer is safe until the user calls
653 * dev_put to indicate they have finished with it.
654 */
655
Eric W. Biederman881d9662007-09-17 11:56:21 -0700656struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
658 struct net_device *dev;
659
660 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700661 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (dev)
663 dev_hold(dev);
664 read_unlock(&dev_base_lock);
665 return dev;
666}
667
668/**
669 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700670 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * @type: media type of device
672 * @ha: hardware address
673 *
674 * Search for an interface by MAC address. Returns NULL if the device
675 * is not found or a pointer to the device. The caller must hold the
676 * rtnl semaphore. The returned device has not had its ref count increased
677 * and the caller must therefore be careful about locking
678 *
679 * BUGS:
680 * If the API was consistent this would be __dev_get_by_hwaddr
681 */
682
Eric W. Biederman881d9662007-09-17 11:56:21 -0700683struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 struct net_device *dev;
686
687 ASSERT_RTNL();
688
Denis V. Lunev81103a52007-12-12 10:47:38 -0800689 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (dev->type == type &&
691 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700692 return dev;
693
694 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300697EXPORT_SYMBOL(dev_getbyhwaddr);
698
Eric W. Biederman881d9662007-09-17 11:56:21 -0700699struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700{
701 struct net_device *dev;
702
703 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700705 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700706 return dev;
707
708 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700709}
710
711EXPORT_SYMBOL(__dev_getfirstbyhwtype);
712
Eric W. Biederman881d9662007-09-17 11:56:21 -0700713struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
715 struct net_device *dev;
716
717 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700718 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719 if (dev)
720 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 rtnl_unlock();
722 return dev;
723}
724
725EXPORT_SYMBOL(dev_getfirstbyhwtype);
726
727/**
728 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700729 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 * @if_flags: IFF_* values
731 * @mask: bitmask of bits in if_flags to check
732 *
733 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900734 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 * had a reference added and the pointer is safe until the user calls
736 * dev_put to indicate they have finished with it.
737 */
738
Eric W. Biederman881d9662007-09-17 11:56:21 -0700739struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Pavel Emelianov7562f872007-05-03 15:13:45 -0700743 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (((dev->flags ^ if_flags) & mask) == 0) {
747 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700748 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 break;
750 }
751 }
752 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
756/**
757 * dev_valid_name - check if name is okay for network device
758 * @name: name string
759 *
760 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 * to allow sysfs to work. We also disallow any kind of
762 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800764int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700766 if (*name == '\0')
767 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700768 if (strlen(name) >= IFNAMSIZ)
769 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700770 if (!strcmp(name, ".") || !strcmp(name, ".."))
771 return 0;
772
773 while (*name) {
774 if (*name == '/' || isspace(*name))
775 return 0;
776 name++;
777 }
778 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200782 * __dev_alloc_name - allocate a name for a device
783 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200785 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 *
787 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700788 * id. It scans list of devices to build up a free map, then chooses
789 * the first empty slot. The caller must hold the dev_base or rtnl lock
790 * while allocating the name and adding the device in order to avoid
791 * duplicates.
792 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
793 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200796static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
798 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 const char *p;
800 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700801 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct net_device *d;
803
804 p = strnchr(name, IFNAMSIZ-1, '%');
805 if (p) {
806 /*
807 * Verify the string as this thing may have come from
808 * the user. There must be either one "%d" and no other "%"
809 * characters.
810 */
811 if (p[1] != 'd' || strchr(p + 2, '%'))
812 return -EINVAL;
813
814 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700815 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (!inuse)
817 return -ENOMEM;
818
Eric W. Biederman881d9662007-09-17 11:56:21 -0700819 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 if (!sscanf(d->name, name, &i))
821 continue;
822 if (i < 0 || i >= max_netdevices)
823 continue;
824
825 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 if (!strncmp(buf, d->name, IFNAMSIZ))
828 set_bit(i, inuse);
829 }
830
831 i = find_first_zero_bit(inuse, max_netdevices);
832 free_page((unsigned long) inuse);
833 }
834
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200835 snprintf(buf, IFNAMSIZ, name, i);
836 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /* It is possible to run out of possible slots
840 * when the name is long and there isn't enough space left
841 * for the digits, or if all bits are used.
842 */
843 return -ENFILE;
844}
845
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200846/**
847 * dev_alloc_name - allocate a name for a device
848 * @dev: device
849 * @name: name format string
850 *
851 * Passed a format string - eg "lt%d" it will try and find a suitable
852 * id. It scans list of devices to build up a free map, then chooses
853 * the first empty slot. The caller must hold the dev_base or rtnl lock
854 * while allocating the name and adding the device in order to avoid
855 * duplicates.
856 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
857 * Returns the number of the unit assigned or a negative errno code.
858 */
859
860int dev_alloc_name(struct net_device *dev, const char *name)
861{
862 char buf[IFNAMSIZ];
863 struct net *net;
864 int ret;
865
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900866 BUG_ON(!dev_net(dev));
867 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200868 ret = __dev_alloc_name(net, name, buf);
869 if (ret >= 0)
870 strlcpy(dev->name, buf, IFNAMSIZ);
871 return ret;
872}
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875/**
876 * dev_change_name - change name of a device
877 * @dev: device
878 * @newname: name (or format string) must be at least IFNAMSIZ
879 *
880 * Change name of a device, can pass format strings "eth%d".
881 * for wildcarding.
882 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700883int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700887 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700888 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900893 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (dev->flags & IFF_UP)
895 return -EBUSY;
896
897 if (!dev_valid_name(newname))
898 return -EINVAL;
899
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700900 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
901 return 0;
902
Herbert Xufcc5a032007-07-30 17:03:38 -0700903 memcpy(oldname, dev->name, IFNAMSIZ);
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (strchr(newname, '%')) {
906 err = dev_alloc_name(dev, newname);
907 if (err < 0)
908 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700910 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -EEXIST;
912 else
913 strlcpy(dev->name, newname, IFNAMSIZ);
914
Herbert Xufcc5a032007-07-30 17:03:38 -0700915rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700916 /* For now only devices in the initial network namespace
917 * are in sysfs.
918 */
919 if (net == &init_net) {
920 ret = device_rename(&dev->dev, dev->name);
921 if (ret) {
922 memcpy(dev->name, oldname, IFNAMSIZ);
923 return ret;
924 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700925 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700926
927 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600928 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700929 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700930 write_unlock_bh(&dev_base_lock);
931
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700932 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700933 ret = notifier_to_errno(ret);
934
935 if (ret) {
936 if (err) {
937 printk(KERN_ERR
938 "%s: name change rollback failed: %d.\n",
939 dev->name, ret);
940 } else {
941 err = ret;
942 memcpy(dev->name, oldname, IFNAMSIZ);
943 goto rollback;
944 }
945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 return err;
948}
949
950/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700951 * dev_set_alias - change ifalias of a device
952 * @dev: device
953 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700954 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700955 *
956 * Set ifalias for a device,
957 */
958int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
959{
960 ASSERT_RTNL();
961
962 if (len >= IFALIASZ)
963 return -EINVAL;
964
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700965 if (!len) {
966 if (dev->ifalias) {
967 kfree(dev->ifalias);
968 dev->ifalias = NULL;
969 }
970 return 0;
971 }
972
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
974 if (!dev->ifalias)
975 return -ENOMEM;
976
977 strlcpy(dev->ifalias, alias, len+1);
978 return len;
979}
980
981
982/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700983 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700984 * @dev: device to cause notification
985 *
986 * Called to indicate a device has changed features.
987 */
988void netdev_features_change(struct net_device *dev)
989{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700990 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700991}
992EXPORT_SYMBOL(netdev_features_change);
993
994/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 * netdev_state_change - device changes state
996 * @dev: device to cause notification
997 *
998 * Called to indicate a device has changed state. This function calls
999 * the notifier chains for netdev_chain and sends a NEWLINK message
1000 * to the routing socket.
1001 */
1002void netdev_state_change(struct net_device *dev)
1003{
1004 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001005 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 }
1008}
1009
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001010void netdev_bonding_change(struct net_device *dev)
1011{
1012 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1013}
1014EXPORT_SYMBOL(netdev_bonding_change);
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016/**
1017 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001018 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 * @name: name of interface
1020 *
1021 * If a network interface is not present and the process has suitable
1022 * privileges this function loads the module. If module loading is not
1023 * available in this kernel then it becomes a nop.
1024 */
1025
Eric W. Biederman881d9662007-09-17 11:56:21 -07001026void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001028 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001031 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 read_unlock(&dev_base_lock);
1033
1034 if (!dev && capable(CAP_SYS_MODULE))
1035 request_module("%s", name);
1036}
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038/**
1039 * dev_open - prepare an interface for use.
1040 * @dev: device to open
1041 *
1042 * Takes a device from down to up state. The device's private open
1043 * function is invoked and then the multicast lists are loaded. Finally
1044 * the device is moved into the up state and a %NETDEV_UP message is
1045 * sent to the netdev notifier chain.
1046 *
1047 * Calling this function on an active interface is a nop. On a failure
1048 * a negative errno code is returned.
1049 */
1050int dev_open(struct net_device *dev)
1051{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001052 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001053 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001055 ASSERT_RTNL();
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 /*
1058 * Is it already up?
1059 */
1060
1061 if (dev->flags & IFF_UP)
1062 return 0;
1063
1064 /*
1065 * Is it even present?
1066 */
1067 if (!netif_device_present(dev))
1068 return -ENODEV;
1069
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /*
1076 * Call device private open method
1077 */
1078 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001079
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001080 if (ops->ndo_validate_addr)
1081 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001082
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001083 if (!ret && ops->ndo_open)
1084 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001086 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * If it went open OK then:
1088 */
1089
Jeff Garzikbada3392007-10-23 20:19:37 -07001090 if (ret)
1091 clear_bit(__LINK_STATE_START, &dev->state);
1092 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 /*
1094 * Set the flags.
1095 */
1096 dev->flags |= IFF_UP;
1097
1098 /*
Dan Williams649274d2009-01-11 00:20:39 -08001099 * Enable NET_DMA
1100 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001101 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001102
1103 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 * Initialize multicasting status
1105 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001106 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 /*
1109 * Wakeup transmit queue engine
1110 */
1111 dev_activate(dev);
1112
1113 /*
1114 * ... and announce new interface.
1115 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001116 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return ret;
1120}
1121
1122/**
1123 * dev_close - shutdown an interface.
1124 * @dev: device to shutdown
1125 *
1126 * This function moves an active device into down state. A
1127 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1128 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1129 * chain.
1130 */
1131int dev_close(struct net_device *dev)
1132{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001133 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001134 ASSERT_RTNL();
1135
David S. Miller9d5010d2007-09-12 14:33:25 +02001136 might_sleep();
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (!(dev->flags & IFF_UP))
1139 return 0;
1140
1141 /*
1142 * Tell people we are going down, so that they can
1143 * prepare to death, when device is still operating.
1144 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001145 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 clear_bit(__LINK_STATE_START, &dev->state);
1148
1149 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001150 * it can be even on different cpu. So just clear netif_running().
1151 *
1152 * dev->stop() will invoke napi_disable() on all of it's
1153 * napi_struct instances on this device.
1154 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001157 dev_deactivate(dev);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /*
1160 * Call the device specific close. This cannot fail.
1161 * Only if device is UP
1162 *
1163 * We allow it to be called even after a DETACH hot-plug
1164 * event.
1165 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001166 if (ops->ndo_stop)
1167 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 /*
1170 * Device is now down.
1171 */
1172
1173 dev->flags &= ~IFF_UP;
1174
1175 /*
1176 * Tell people we are down
1177 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001178 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Dan Williams649274d2009-01-11 00:20:39 -08001180 /*
1181 * Shutdown NET_DMA
1182 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001183 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return 0;
1186}
1187
1188
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001189/**
1190 * dev_disable_lro - disable Large Receive Offload on a device
1191 * @dev: device
1192 *
1193 * Disable Large Receive Offload (LRO) on a net device. Must be
1194 * called under RTNL. This is needed if received packets may be
1195 * forwarded to another interface.
1196 */
1197void dev_disable_lro(struct net_device *dev)
1198{
1199 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1200 dev->ethtool_ops->set_flags) {
1201 u32 flags = dev->ethtool_ops->get_flags(dev);
1202 if (flags & ETH_FLAG_LRO) {
1203 flags &= ~ETH_FLAG_LRO;
1204 dev->ethtool_ops->set_flags(dev, flags);
1205 }
1206 }
1207 WARN_ON(dev->features & NETIF_F_LRO);
1208}
1209EXPORT_SYMBOL(dev_disable_lro);
1210
1211
Eric W. Biederman881d9662007-09-17 11:56:21 -07001212static int dev_boot_phase = 1;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214/*
1215 * Device change register/unregister. These are not inline or static
1216 * as we export them to the world.
1217 */
1218
1219/**
1220 * register_netdevice_notifier - register a network notifier block
1221 * @nb: notifier
1222 *
1223 * Register a notifier to be called when network device events occur.
1224 * The notifier passed is linked into the kernel structures and must
1225 * not be reused until it has been unregistered. A negative errno code
1226 * is returned on a failure.
1227 *
1228 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001229 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 * view of the network device list.
1231 */
1232
1233int register_netdevice_notifier(struct notifier_block *nb)
1234{
1235 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001236 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 int err;
1239
1240 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001241 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001242 if (err)
1243 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001244 if (dev_boot_phase)
1245 goto unlock;
1246 for_each_net(net) {
1247 for_each_netdev(net, dev) {
1248 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1249 err = notifier_to_errno(err);
1250 if (err)
1251 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Eric W. Biederman881d9662007-09-17 11:56:21 -07001253 if (!(dev->flags & IFF_UP))
1254 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001255
Eric W. Biederman881d9662007-09-17 11:56:21 -07001256 nb->notifier_call(nb, NETDEV_UP, dev);
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001259
1260unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 rtnl_unlock();
1262 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
1264rollback:
1265 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001266 for_each_net(net) {
1267 for_each_netdev(net, dev) {
1268 if (dev == last)
1269 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001270
Eric W. Biederman881d9662007-09-17 11:56:21 -07001271 if (dev->flags & IFF_UP) {
1272 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_DOWN, dev);
1274 }
1275 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001276 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001277 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001278
1279 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001280 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
1283/**
1284 * unregister_netdevice_notifier - unregister a network notifier block
1285 * @nb: notifier
1286 *
1287 * Unregister a notifier previously registered by
1288 * register_netdevice_notifier(). The notifier is unlinked into the
1289 * kernel structures and may then be reused. A negative errno code
1290 * is returned on a failure.
1291 */
1292
1293int unregister_netdevice_notifier(struct notifier_block *nb)
1294{
Herbert Xu9f514952006-03-25 01:24:25 -08001295 int err;
1296
1297 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001298 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001299 rtnl_unlock();
1300 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303/**
1304 * call_netdevice_notifiers - call all network notifier blocks
1305 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001306 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 *
1308 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001309 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 */
1311
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001312int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001314 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
1316
1317/* When > 0 there are consumers of rx skb time stamps */
1318static atomic_t netstamp_needed = ATOMIC_INIT(0);
1319
1320void net_enable_timestamp(void)
1321{
1322 atomic_inc(&netstamp_needed);
1323}
1324
1325void net_disable_timestamp(void)
1326{
1327 atomic_dec(&netstamp_needed);
1328}
1329
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001330static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001333 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001334 else
1335 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338/*
1339 * Support routine. Sends outgoing frames to any network
1340 * taps currently in use.
1341 */
1342
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001343static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
1345 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001346
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001347#ifdef CONFIG_NET_CLS_ACT
1348 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1349 net_timestamp(skb);
1350#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001351 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001352#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 rcu_read_lock();
1355 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1356 /* Never send packets back to the socket
1357 * they originated from - MvS (miquels@drinkel.ow.org)
1358 */
1359 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1363 if (!skb2)
1364 break;
1365
1366 /* skb->nh should be correctly
1367 set by sender, so that the second statement is
1368 just protection against buggy protocols.
1369 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001370 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001372 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001373 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (net_ratelimit())
1375 printk(KERN_CRIT "protocol %04x is "
1376 "buggy, dev %s\n",
1377 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001378 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
1380
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001381 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001383 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 }
1385 }
1386 rcu_read_unlock();
1387}
1388
Denis Vlasenko56079432006-03-29 15:57:29 -08001389
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001390static inline void __netif_reschedule(struct Qdisc *q)
1391{
1392 struct softnet_data *sd;
1393 unsigned long flags;
1394
1395 local_irq_save(flags);
1396 sd = &__get_cpu_var(softnet_data);
1397 q->next_sched = sd->output_queue;
1398 sd->output_queue = q;
1399 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1400 local_irq_restore(flags);
1401}
1402
David S. Miller37437bb2008-07-16 02:15:04 -07001403void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001405 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1406 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001407}
1408EXPORT_SYMBOL(__netif_schedule);
1409
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001410void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001411{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001412 if (atomic_dec_and_test(&skb->users)) {
1413 struct softnet_data *sd;
1414 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 local_irq_save(flags);
1417 sd = &__get_cpu_var(softnet_data);
1418 skb->next = sd->completion_queue;
1419 sd->completion_queue = skb;
1420 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1421 local_irq_restore(flags);
1422 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001423}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001424EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001425
1426void dev_kfree_skb_any(struct sk_buff *skb)
1427{
1428 if (in_irq() || irqs_disabled())
1429 dev_kfree_skb_irq(skb);
1430 else
1431 dev_kfree_skb(skb);
1432}
1433EXPORT_SYMBOL(dev_kfree_skb_any);
1434
1435
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001436/**
1437 * netif_device_detach - mark device as removed
1438 * @dev: network device
1439 *
1440 * Mark device as removed from system and therefore no longer available.
1441 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001442void netif_device_detach(struct net_device *dev)
1443{
1444 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1445 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001446 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001447 }
1448}
1449EXPORT_SYMBOL(netif_device_detach);
1450
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001451/**
1452 * netif_device_attach - mark device as attached
1453 * @dev: network device
1454 *
1455 * Mark device as attached from system and restart if needed.
1456 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001457void netif_device_attach(struct net_device *dev)
1458{
1459 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1460 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001461 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001463 }
1464}
1465EXPORT_SYMBOL(netif_device_attach);
1466
Ben Hutchings6de329e2008-06-16 17:02:28 -07001467static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1468{
1469 return ((features & NETIF_F_GEN_CSUM) ||
1470 ((features & NETIF_F_IP_CSUM) &&
1471 protocol == htons(ETH_P_IP)) ||
1472 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001473 protocol == htons(ETH_P_IPV6)) ||
1474 ((features & NETIF_F_FCOE_CRC) &&
1475 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001476}
1477
1478static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1479{
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1481 return true;
1482
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1487 return true;
1488 }
1489
1490 return false;
1491}
Denis Vlasenko56079432006-03-29 15:57:29 -08001492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493/*
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1496 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001497int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
Al Virod3bc23e2006-11-14 21:24:49 -08001499 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001500 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Patrick McHardy84fa7932006-08-29 16:44:56 -07001502 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001503 goto out_set_summed;
1504
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509
Herbert Xua0308472007-10-15 01:47:15 -07001510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1513
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1516
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1520 if (ret)
1521 goto out;
1522 }
1523
Herbert Xua0308472007-10-15 01:47:15 -07001524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001525out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001527out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return ret;
1529}
1530
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001531/**
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001534 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535 *
1536 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001537 *
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001541struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001542{
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001545 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001546 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001547
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001548 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001549 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001550 __skb_pull(skb, skb->mac_len);
1551
Herbert Xu67fd1a72009-01-19 16:26:44 -08001552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1555
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1558
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1560 "ip_summed=%d",
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1564
Herbert Xua430a432006-07-08 13:34:56 -07001565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1568 }
1569
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001570 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1578 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001581 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001582 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 break;
1584 }
1585 }
1586 rcu_read_unlock();
1587
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001588 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001589
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001590 return segs;
1591}
1592
1593EXPORT_SYMBOL(skb_gso_segment);
1594
Herbert Xufb286bb2005-11-10 13:01:24 -08001595/* Take action when hardware reception checksum errors are detected. */
1596#ifdef CONFIG_BUG
1597void netdev_rx_csum_fault(struct net_device *dev)
1598{
1599 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001600 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001601 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001602 dump_stack();
1603 }
1604}
1605EXPORT_SYMBOL(netdev_rx_csum_fault);
1606#endif
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608/* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1611 */
1612
1613static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1614{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001615#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 int i;
1617
1618 if (dev->features & NETIF_F_HIGHDMA)
1619 return 0;
1620
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1623 return 1;
1624
Herbert Xu3d3a8532006-06-27 13:33:10 -07001625#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 return 0;
1627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001629struct dev_gso_cb {
1630 void (*destructor)(struct sk_buff *skb);
1631};
1632
1633#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1634
1635static void dev_gso_skb_destructor(struct sk_buff *skb)
1636{
1637 struct dev_gso_cb *cb;
1638
1639 do {
1640 struct sk_buff *nskb = skb->next;
1641
1642 skb->next = nskb->next;
1643 nskb->next = NULL;
1644 kfree_skb(nskb);
1645 } while (skb->next);
1646
1647 cb = DEV_GSO_CB(skb);
1648 if (cb->destructor)
1649 cb->destructor(skb);
1650}
1651
1652/**
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1655 *
1656 * This function segments the given skb and stores the list of segments
1657 * in skb->next.
1658 */
1659static int dev_gso_segment(struct sk_buff *skb)
1660{
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1664 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665
Herbert Xu576a30e2006-06-27 13:22:38 -07001666 segs = skb_gso_segment(skb, features);
1667
1668 /* Verifying header integrity only. */
1669 if (!segs)
1670 return 0;
1671
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001672 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001673 return PTR_ERR(segs);
1674
1675 skb->next = segs;
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1678
1679 return 0;
1680}
1681
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001682int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001684{
Stephen Hemminger00829822008-11-20 20:14:53 -08001685 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001686 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001687
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001688 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001689 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001690 dev_queue_xmit_nit(skb, dev);
1691
Herbert Xu576a30e2006-06-27 13:22:38 -07001692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1694 goto out_kfree_skb;
1695 if (skb->next)
1696 goto gso;
1697 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698
Eric Dumazet93f154b2009-05-18 22:19:19 -07001699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
Patrick Ohlyac45f602009-02-12 05:03:37 +00001706 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001707 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001708 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001709 /*
1710 * TODO: if skb_orphan() was called by
1711 * dev->hard_start_xmit() (for example, the unmodified
1712 * igb driver does that; bnx2 doesn't), then
1713 * skb_tx_software_timestamp() will be unable to send
1714 * back the time stamp.
1715 *
1716 * How can this be prevented? Always create another
1717 * reference to the socket before calling
1718 * dev->hard_start_xmit()? Prevent that skb_orphan()
1719 * does anything in dev->hard_start_xmit() by clearing
1720 * the skb destructor before the call and restoring it
1721 * afterwards, then doing the skb_orphan() ourselves?
1722 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001723 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724 }
1725
Herbert Xu576a30e2006-06-27 13:22:38 -07001726gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 do {
1728 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729
1730 skb->next = nskb->next;
1731 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001732 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001733 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 skb->next = nskb;
1736 return rc;
1737 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001738 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001740 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001742
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001743 skb->destructor = DEV_GSO_CB(skb)->destructor;
1744
1745out_kfree_skb:
1746 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001747 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001748}
1749
David S. Miller70192982009-01-27 16:34:47 -08001750static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001751
Stephen Hemminger92477442009-03-21 13:39:26 -07001752u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001753{
David S. Miller70192982009-01-27 16:34:47 -08001754 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001755
David S. Miller513de112009-05-03 14:43:10 -07001756 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001762
1763 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001764 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001765 else
David S. Miller70192982009-01-27 16:34:47 -08001766 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001767
David S. Miller70192982009-01-27 16:34:47 -08001768 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001769
David S. Millerb6b2fed2008-07-21 09:48:06 -07001770 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001771}
Stephen Hemminger92477442009-03-21 13:39:26 -07001772EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001773
David S. Millere8a04642008-07-17 00:34:19 -07001774static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1775 struct sk_buff *skb)
1776{
Stephen Hemminger00829822008-11-20 20:14:53 -08001777 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001778 u16 queue_index = 0;
1779
Stephen Hemminger00829822008-11-20 20:14:53 -08001780 if (ops->ndo_select_queue)
1781 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001782 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001783 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001784
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001785 skb_set_queue_mapping(skb, queue_index);
1786 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001787}
1788
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001789static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1790 struct net_device *dev,
1791 struct netdev_queue *txq)
1792{
1793 spinlock_t *root_lock = qdisc_lock(q);
1794 int rc;
1795
1796 spin_lock(root_lock);
1797 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1798 kfree_skb(skb);
1799 rc = NET_XMIT_DROP;
1800 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1801 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1802 /*
1803 * This is a work-conserving queue; there are no old skbs
1804 * waiting to be sent out; and the qdisc is not running -
1805 * xmit the skb directly.
1806 */
1807 __qdisc_update_bstats(q, skb->len);
1808 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1809 __qdisc_run(q);
1810 else
1811 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1812
1813 rc = NET_XMIT_SUCCESS;
1814 } else {
1815 rc = qdisc_enqueue_root(skb, q);
1816 qdisc_run(q);
1817 }
1818 spin_unlock(root_lock);
1819
1820 return rc;
1821}
1822
Dave Jonesd29f7492008-07-22 14:09:06 -07001823/**
1824 * dev_queue_xmit - transmit a buffer
1825 * @skb: buffer to transmit
1826 *
1827 * Queue a buffer for transmission to a network device. The caller must
1828 * have set the device and priority and built the buffer before calling
1829 * this function. The function can be called from an interrupt.
1830 *
1831 * A negative errno code is returned on a failure. A success does not
1832 * guarantee the frame will be transmitted as it may be dropped due
1833 * to congestion or traffic shaping.
1834 *
1835 * -----------------------------------------------------------------------------------
1836 * I notice this method can also return errors from the queue disciplines,
1837 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1838 * be positive.
1839 *
1840 * Regardless of the return value, the skb is consumed, so it is currently
1841 * difficult to retry a send to this method. (You can bump the ref count
1842 * before sending to hold a reference for retry if you are careful.)
1843 *
1844 * When calling this method, interrupts MUST be enabled. This is because
1845 * the BH enable code must have IRQs enabled so that it will not deadlock.
1846 * --BLG
1847 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848int dev_queue_xmit(struct sk_buff *skb)
1849{
1850 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001851 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 struct Qdisc *q;
1853 int rc = -ENOMEM;
1854
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001855 /* GSO will handle the following emulations directly. */
1856 if (netif_needs_gso(dev, skb))
1857 goto gso;
1858
David S. Miller4cf704f2009-06-09 00:18:51 -07001859 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001861 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 goto out_kfree_skb;
1863
1864 /* Fragmented skb is linearized if device does not support SG,
1865 * or if at least one of fragments is in highmem and device
1866 * does not support DMA from it.
1867 */
1868 if (skb_shinfo(skb)->nr_frags &&
1869 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001870 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 goto out_kfree_skb;
1872
1873 /* If packet is not checksummed and device does not support
1874 * checksumming for this protocol, complete checksumming here.
1875 */
Herbert Xu663ead32007-04-09 11:59:07 -07001876 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1877 skb_set_transport_header(skb, skb->csum_start -
1878 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001879 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1880 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001883gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001884 /* Disable soft irqs for various locks below. Also
1885 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001887 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
David S. Millereae792b2008-07-15 03:03:33 -07001889 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001890 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892#ifdef CONFIG_NET_CLS_ACT
1893 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1894#endif
1895 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001896 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001897 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
1899
1900 /* The device has no queue. Common case for software devices:
1901 loopback, all the sorts of tunnels...
1902
Herbert Xu932ff272006-06-09 12:20:56 -07001903 Really, it is unlikely that netif_tx_lock protection is necessary
1904 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 counters.)
1906 However, it is possible, that they rely on protection
1907 made by us here.
1908
1909 Check this and shot the lock. It is not prone from deadlocks.
1910 Either shot noqueue qdisc, it is even simpler 8)
1911 */
1912 if (dev->flags & IFF_UP) {
1913 int cpu = smp_processor_id(); /* ok because BHs are off */
1914
David S. Millerc773e842008-07-08 23:13:53 -07001915 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916
David S. Millerc773e842008-07-08 23:13:53 -07001917 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001919 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00001920 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001921 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001922 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 goto out;
1924 }
1925 }
David S. Millerc773e842008-07-08 23:13:53 -07001926 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 if (net_ratelimit())
1928 printk(KERN_CRIT "Virtual device %s asks to "
1929 "queue packet!\n", dev->name);
1930 } else {
1931 /* Recursion is detected! It is possible,
1932 * unfortunately */
1933 if (net_ratelimit())
1934 printk(KERN_CRIT "Dead loop on virtual device "
1935 "%s, fix it urgently!\n", dev->name);
1936 }
1937 }
1938
1939 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001940 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942out_kfree_skb:
1943 kfree_skb(skb);
1944 return rc;
1945out:
Herbert Xud4828d82006-06-22 02:28:18 -07001946 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 return rc;
1948}
1949
1950
1951/*=======================================================================
1952 Receiver routines
1953 =======================================================================*/
1954
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001955int netdev_max_backlog __read_mostly = 1000;
1956int netdev_budget __read_mostly = 300;
1957int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
1959DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1960
1961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962/**
1963 * netif_rx - post buffer to the network code
1964 * @skb: buffer to post
1965 *
1966 * This function receives a packet from a device driver and queues it for
1967 * the upper (protocol) levels to process. It always succeeds. The buffer
1968 * may be dropped during processing for congestion control or by the
1969 * protocol layers.
1970 *
1971 * return values:
1972 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 * NET_RX_DROP (packet was dropped)
1974 *
1975 */
1976
1977int netif_rx(struct sk_buff *skb)
1978{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 struct softnet_data *queue;
1980 unsigned long flags;
1981
1982 /* if netpoll wants it, pretend we never saw it */
1983 if (netpoll_rx(skb))
1984 return NET_RX_DROP;
1985
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001986 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001987 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
1989 /*
1990 * The code is rearranged so that the path is the most
1991 * short when CPU is congested, but is still operating.
1992 */
1993 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 queue = &__get_cpu_var(softnet_data);
1995
1996 __get_cpu_var(netdev_rx_stat).total++;
1997 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1998 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002002 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 }
2004
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002005 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 goto enqueue;
2007 }
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 __get_cpu_var(netdev_rx_stat).dropped++;
2010 local_irq_restore(flags);
2011
2012 kfree_skb(skb);
2013 return NET_RX_DROP;
2014}
2015
2016int netif_rx_ni(struct sk_buff *skb)
2017{
2018 int err;
2019
2020 preempt_disable();
2021 err = netif_rx(skb);
2022 if (local_softirq_pending())
2023 do_softirq();
2024 preempt_enable();
2025
2026 return err;
2027}
2028
2029EXPORT_SYMBOL(netif_rx_ni);
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031static void net_tx_action(struct softirq_action *h)
2032{
2033 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2034
2035 if (sd->completion_queue) {
2036 struct sk_buff *clist;
2037
2038 local_irq_disable();
2039 clist = sd->completion_queue;
2040 sd->completion_queue = NULL;
2041 local_irq_enable();
2042
2043 while (clist) {
2044 struct sk_buff *skb = clist;
2045 clist = clist->next;
2046
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002047 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 __kfree_skb(skb);
2049 }
2050 }
2051
2052 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002053 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
2055 local_irq_disable();
2056 head = sd->output_queue;
2057 sd->output_queue = NULL;
2058 local_irq_enable();
2059
2060 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002061 struct Qdisc *q = head;
2062 spinlock_t *root_lock;
2063
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 head = head->next_sched;
2065
David S. Miller5fb66222008-08-02 20:02:43 -07002066 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002067 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002068 smp_mb__before_clear_bit();
2069 clear_bit(__QDISC_STATE_SCHED,
2070 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002071 qdisc_run(q);
2072 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002074 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002075 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002076 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002077 } else {
2078 smp_mb__before_clear_bit();
2079 clear_bit(__QDISC_STATE_SCHED,
2080 &q->state);
2081 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 }
2083 }
2084 }
2085}
2086
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002087static inline int deliver_skb(struct sk_buff *skb,
2088 struct packet_type *pt_prev,
2089 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002092 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094
2095#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002096
2097#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2098/* This hook is defined here for ATM LANE */
2099int (*br_fdb_test_addr_hook)(struct net_device *dev,
2100 unsigned char *addr) __read_mostly;
2101EXPORT_SYMBOL(br_fdb_test_addr_hook);
2102#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Stephen Hemminger6229e362007-03-21 13:38:47 -07002104/*
2105 * If bridge module is loaded call bridging hook.
2106 * returns NULL if packet was consumed.
2107 */
2108struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2109 struct sk_buff *skb) __read_mostly;
Michał Mirosławda678292009-06-05 05:35:28 +00002110EXPORT_SYMBOL(br_handle_frame_hook);
2111
Stephen Hemminger6229e362007-03-21 13:38:47 -07002112static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2113 struct packet_type **pt_prev, int *ret,
2114 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
2116 struct net_bridge_port *port;
2117
Stephen Hemminger6229e362007-03-21 13:38:47 -07002118 if (skb->pkt_type == PACKET_LOOPBACK ||
2119 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2120 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002123 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002125 }
2126
Stephen Hemminger6229e362007-03-21 13:38:47 -07002127 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128}
2129#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002130#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131#endif
2132
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002133#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2134struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2135EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2136
2137static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2138 struct packet_type **pt_prev,
2139 int *ret,
2140 struct net_device *orig_dev)
2141{
2142 if (skb->dev->macvlan_port == NULL)
2143 return skb;
2144
2145 if (*pt_prev) {
2146 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2147 *pt_prev = NULL;
2148 }
2149 return macvlan_handle_frame_hook(skb);
2150}
2151#else
2152#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2153#endif
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155#ifdef CONFIG_NET_CLS_ACT
2156/* TODO: Maybe we should just force sch_ingress to be compiled in
2157 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2158 * a compare and 2 stores extra right now if we dont have it on
2159 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002160 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 * the ingress scheduler, you just cant add policies on ingress.
2162 *
2163 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002164static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002167 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002168 struct netdev_queue *rxq;
2169 int result = TC_ACT_OK;
2170 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002171
Herbert Xuf697c3e2007-10-14 00:38:47 -07002172 if (MAX_RED_LOOP < ttl++) {
2173 printk(KERN_WARNING
2174 "Redir loop detected Dropping packet (%d->%d)\n",
2175 skb->iif, dev->ifindex);
2176 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 }
2178
Herbert Xuf697c3e2007-10-14 00:38:47 -07002179 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2180 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2181
David S. Miller555353c2008-07-08 17:33:13 -07002182 rxq = &dev->rx_queue;
2183
David S. Miller83874002008-07-17 00:53:03 -07002184 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002185 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002186 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002187 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2188 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002189 spin_unlock(qdisc_lock(q));
2190 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 return result;
2193}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002194
2195static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2196 struct packet_type **pt_prev,
2197 int *ret, struct net_device *orig_dev)
2198{
David S. Miller8d50b532008-07-30 02:37:46 -07002199 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002200 goto out;
2201
2202 if (*pt_prev) {
2203 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2204 *pt_prev = NULL;
2205 } else {
2206 /* Huh? Why does turning on AF_PACKET affect this? */
2207 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2208 }
2209
2210 switch (ing_filter(skb)) {
2211 case TC_ACT_SHOT:
2212 case TC_ACT_STOLEN:
2213 kfree_skb(skb);
2214 return NULL;
2215 }
2216
2217out:
2218 skb->tc_verd = 0;
2219 return skb;
2220}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221#endif
2222
Patrick McHardybc1d0412008-07-14 22:49:30 -07002223/*
2224 * netif_nit_deliver - deliver received packets to network taps
2225 * @skb: buffer
2226 *
2227 * This function is used to deliver incoming packets to network
2228 * taps. It should be used when the normal netif_receive_skb path
2229 * is bypassed, for example because of VLAN acceleration.
2230 */
2231void netif_nit_deliver(struct sk_buff *skb)
2232{
2233 struct packet_type *ptype;
2234
2235 if (list_empty(&ptype_all))
2236 return;
2237
2238 skb_reset_network_header(skb);
2239 skb_reset_transport_header(skb);
2240 skb->mac_len = skb->network_header - skb->mac_header;
2241
2242 rcu_read_lock();
2243 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2244 if (!ptype->dev || ptype->dev == skb->dev)
2245 deliver_skb(skb, ptype, skb->dev);
2246 }
2247 rcu_read_unlock();
2248}
2249
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002250/**
2251 * netif_receive_skb - process receive buffer from network
2252 * @skb: buffer to process
2253 *
2254 * netif_receive_skb() is the main receive data processing function.
2255 * It always succeeds. The buffer may be dropped during processing
2256 * for congestion control or by the protocol layers.
2257 *
2258 * This function may only be called from softirq context and interrupts
2259 * should be enabled.
2260 *
2261 * Return values (usually ignored):
2262 * NET_RX_SUCCESS: no congestion
2263 * NET_RX_DROP: packet was dropped
2264 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265int netif_receive_skb(struct sk_buff *skb)
2266{
2267 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002268 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002269 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002271 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002273 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2274 return NET_RX_SUCCESS;
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002277 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 return NET_RX_DROP;
2279
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002280 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002281 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Patrick McHardyc01003c2007-03-29 11:46:52 -07002283 if (!skb->iif)
2284 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002285
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002286 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002287 orig_dev = skb->dev;
2288 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002289 if (skb_bond_should_drop(skb))
2290 null_or_orig = orig_dev; /* deliver only exact match */
2291 else
2292 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002293 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 __get_cpu_var(netdev_rx_stat).total++;
2296
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002297 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002298 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002299 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301 pt_prev = NULL;
2302
2303 rcu_read_lock();
2304
2305#ifdef CONFIG_NET_CLS_ACT
2306 if (skb->tc_verd & TC_NCLS) {
2307 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2308 goto ncls;
2309 }
2310#endif
2311
2312 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002313 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2314 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002315 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002316 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 pt_prev = ptype;
2318 }
2319 }
2320
2321#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002322 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2323 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325ncls:
2326#endif
2327
Stephen Hemminger6229e362007-03-21 13:38:47 -07002328 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2329 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002331 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2332 if (!skb)
2333 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
2335 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002336 list_for_each_entry_rcu(ptype,
2337 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002339 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2340 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002341 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002342 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 pt_prev = ptype;
2344 }
2345 }
2346
2347 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002348 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 } else {
2350 kfree_skb(skb);
2351 /* Jamal, now you will not able to escape explaining
2352 * me how you were going to use this. :-)
2353 */
2354 ret = NET_RX_DROP;
2355 }
2356
2357out:
2358 rcu_read_unlock();
2359 return ret;
2360}
2361
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002362/* Network device is going away, flush any packets still pending */
2363static void flush_backlog(void *arg)
2364{
2365 struct net_device *dev = arg;
2366 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2367 struct sk_buff *skb, *tmp;
2368
2369 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2370 if (skb->dev == dev) {
2371 __skb_unlink(skb, &queue->input_pkt_queue);
2372 kfree_skb(skb);
2373 }
2374}
2375
Herbert Xud565b0a2008-12-15 23:38:52 -08002376static int napi_gro_complete(struct sk_buff *skb)
2377{
2378 struct packet_type *ptype;
2379 __be16 type = skb->protocol;
2380 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2381 int err = -ENOENT;
2382
Herbert Xufc59f9a2009-04-14 15:11:06 -07002383 if (NAPI_GRO_CB(skb)->count == 1) {
2384 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002385 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002386 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002387
2388 rcu_read_lock();
2389 list_for_each_entry_rcu(ptype, head, list) {
2390 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2391 continue;
2392
2393 err = ptype->gro_complete(skb);
2394 break;
2395 }
2396 rcu_read_unlock();
2397
2398 if (err) {
2399 WARN_ON(&ptype->list == head);
2400 kfree_skb(skb);
2401 return NET_RX_SUCCESS;
2402 }
2403
2404out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002405 return netif_receive_skb(skb);
2406}
2407
2408void napi_gro_flush(struct napi_struct *napi)
2409{
2410 struct sk_buff *skb, *next;
2411
2412 for (skb = napi->gro_list; skb; skb = next) {
2413 next = skb->next;
2414 skb->next = NULL;
2415 napi_gro_complete(skb);
2416 }
2417
Herbert Xu4ae55442009-02-08 18:00:36 +00002418 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002419 napi->gro_list = NULL;
2420}
2421EXPORT_SYMBOL(napi_gro_flush);
2422
Herbert Xu96e93ea2009-01-06 10:49:34 -08002423int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002424{
2425 struct sk_buff **pp = NULL;
2426 struct packet_type *ptype;
2427 __be16 type = skb->protocol;
2428 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002429 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002430 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002431 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002432
2433 if (!(skb->dev->features & NETIF_F_GRO))
2434 goto normal;
2435
David S. Miller4cf704f2009-06-09 00:18:51 -07002436 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002437 goto normal;
2438
Herbert Xud565b0a2008-12-15 23:38:52 -08002439 rcu_read_lock();
2440 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002441 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2442 continue;
2443
Herbert Xu86911732009-01-29 14:19:50 +00002444 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002445 mac_len = skb->network_header - skb->mac_header;
2446 skb->mac_len = mac_len;
2447 NAPI_GRO_CB(skb)->same_flow = 0;
2448 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002449 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002450
Herbert Xud565b0a2008-12-15 23:38:52 -08002451 pp = ptype->gro_receive(&napi->gro_list, skb);
2452 break;
2453 }
2454 rcu_read_unlock();
2455
2456 if (&ptype->list == head)
2457 goto normal;
2458
Herbert Xu0da2afd52008-12-26 14:57:42 -08002459 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002460 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002461
Herbert Xud565b0a2008-12-15 23:38:52 -08002462 if (pp) {
2463 struct sk_buff *nskb = *pp;
2464
2465 *pp = nskb->next;
2466 nskb->next = NULL;
2467 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002468 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002469 }
2470
Herbert Xu0da2afd52008-12-26 14:57:42 -08002471 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002472 goto ok;
2473
Herbert Xu4ae55442009-02-08 18:00:36 +00002474 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002475 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002476
Herbert Xu4ae55442009-02-08 18:00:36 +00002477 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002478 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002479 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002480 skb->next = napi->gro_list;
2481 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002482 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002483
Herbert Xuad0f9902009-02-01 01:24:55 -08002484pull:
Herbert Xucb189782009-05-26 18:50:31 +00002485 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2486 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2487
2488 BUG_ON(skb->end - skb->tail < grow);
2489
2490 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2491
2492 skb->tail += grow;
2493 skb->data_len -= grow;
2494
2495 skb_shinfo(skb)->frags[0].page_offset += grow;
2496 skb_shinfo(skb)->frags[0].size -= grow;
2497
2498 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2499 put_page(skb_shinfo(skb)->frags[0].page);
2500 memmove(skb_shinfo(skb)->frags,
2501 skb_shinfo(skb)->frags + 1,
2502 --skb_shinfo(skb)->nr_frags);
2503 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002504 }
2505
Herbert Xud565b0a2008-12-15 23:38:52 -08002506ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002507 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002508
2509normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002510 ret = GRO_NORMAL;
2511 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002512}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002513EXPORT_SYMBOL(dev_gro_receive);
2514
2515static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2516{
2517 struct sk_buff *p;
2518
Herbert Xud1c76af2009-03-16 10:50:02 -07002519 if (netpoll_rx_on(skb))
2520 return GRO_NORMAL;
2521
Herbert Xu96e93ea2009-01-06 10:49:34 -08002522 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002523 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2524 && !compare_ether_header(skb_mac_header(p),
2525 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002526 NAPI_GRO_CB(p)->flush = 0;
2527 }
2528
2529 return dev_gro_receive(napi, skb);
2530}
Herbert Xu5d38a072009-01-04 16:13:40 -08002531
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002532int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002533{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002534 int err = NET_RX_SUCCESS;
2535
2536 switch (ret) {
2537 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002538 return netif_receive_skb(skb);
2539
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002540 case GRO_DROP:
2541 err = NET_RX_DROP;
2542 /* fall through */
2543
2544 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002545 kfree_skb(skb);
2546 break;
2547 }
2548
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002549 return err;
2550}
2551EXPORT_SYMBOL(napi_skb_finish);
2552
Herbert Xu78a478d2009-05-26 18:50:21 +00002553void skb_gro_reset_offset(struct sk_buff *skb)
2554{
2555 NAPI_GRO_CB(skb)->data_offset = 0;
2556 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002557 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002558
Herbert Xu78d3fd02009-05-26 18:50:23 +00002559 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002560 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002561 NAPI_GRO_CB(skb)->frag0 =
2562 page_address(skb_shinfo(skb)->frags[0].page) +
2563 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002564 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2565 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002566}
2567EXPORT_SYMBOL(skb_gro_reset_offset);
2568
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002569int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2570{
Herbert Xu86911732009-01-29 14:19:50 +00002571 skb_gro_reset_offset(skb);
2572
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002573 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002574}
2575EXPORT_SYMBOL(napi_gro_receive);
2576
Herbert Xu96e93ea2009-01-06 10:49:34 -08002577void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2578{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002579 __skb_pull(skb, skb_headlen(skb));
2580 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2581
2582 napi->skb = skb;
2583}
2584EXPORT_SYMBOL(napi_reuse_skb);
2585
Herbert Xu76620aa2009-04-16 02:02:07 -07002586struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002587{
2588 struct net_device *dev = napi->dev;
2589 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002590
2591 if (!skb) {
2592 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2593 if (!skb)
2594 goto out;
2595
2596 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002597
2598 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002599 }
2600
Herbert Xu96e93ea2009-01-06 10:49:34 -08002601out:
2602 return skb;
2603}
Herbert Xu76620aa2009-04-16 02:02:07 -07002604EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002605
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002606int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2607{
2608 int err = NET_RX_SUCCESS;
2609
2610 switch (ret) {
2611 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002612 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002613 skb->protocol = eth_type_trans(skb, napi->dev);
2614
2615 if (ret == GRO_NORMAL)
2616 return netif_receive_skb(skb);
2617
2618 skb_gro_pull(skb, -ETH_HLEN);
2619 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002620
2621 case GRO_DROP:
2622 err = NET_RX_DROP;
2623 /* fall through */
2624
2625 case GRO_MERGED_FREE:
2626 napi_reuse_skb(napi, skb);
2627 break;
2628 }
2629
2630 return err;
2631}
2632EXPORT_SYMBOL(napi_frags_finish);
2633
Herbert Xu76620aa2009-04-16 02:02:07 -07002634struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002635{
Herbert Xu76620aa2009-04-16 02:02:07 -07002636 struct sk_buff *skb = napi->skb;
2637 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002638 unsigned int hlen;
2639 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002640
2641 napi->skb = NULL;
2642
2643 skb_reset_mac_header(skb);
2644 skb_gro_reset_offset(skb);
2645
Herbert Xua5b1cf22009-05-26 18:50:28 +00002646 off = skb_gro_offset(skb);
2647 hlen = off + sizeof(*eth);
2648 eth = skb_gro_header_fast(skb, off);
2649 if (skb_gro_header_hard(skb, hlen)) {
2650 eth = skb_gro_header_slow(skb, hlen, off);
2651 if (unlikely(!eth)) {
2652 napi_reuse_skb(napi, skb);
2653 skb = NULL;
2654 goto out;
2655 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002656 }
2657
2658 skb_gro_pull(skb, sizeof(*eth));
2659
2660 /*
2661 * This works because the only protocols we care about don't require
2662 * special handling. We'll fix it up properly at the end.
2663 */
2664 skb->protocol = eth->h_proto;
2665
2666out:
2667 return skb;
2668}
2669EXPORT_SYMBOL(napi_frags_skb);
2670
2671int napi_gro_frags(struct napi_struct *napi)
2672{
2673 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002674
2675 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002676 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002677
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002678 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002679}
2680EXPORT_SYMBOL(napi_gro_frags);
2681
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002682static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
2684 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2686 unsigned long start_time = jiffies;
2687
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002688 napi->weight = weight_p;
2689 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 local_irq_disable();
2693 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002694 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002695 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002696 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002697 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 local_irq_enable();
2700
Herbert Xu8f1ead22009-03-26 00:59:10 -07002701 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002702 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002704 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705}
2706
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002707/**
2708 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002709 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002710 *
2711 * The entry's receive function will be scheduled to run
2712 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002713void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002714{
2715 unsigned long flags;
2716
2717 local_irq_save(flags);
2718 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2719 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2720 local_irq_restore(flags);
2721}
2722EXPORT_SYMBOL(__napi_schedule);
2723
Herbert Xud565b0a2008-12-15 23:38:52 -08002724void __napi_complete(struct napi_struct *n)
2725{
2726 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2727 BUG_ON(n->gro_list);
2728
2729 list_del(&n->poll_list);
2730 smp_mb__before_clear_bit();
2731 clear_bit(NAPI_STATE_SCHED, &n->state);
2732}
2733EXPORT_SYMBOL(__napi_complete);
2734
2735void napi_complete(struct napi_struct *n)
2736{
2737 unsigned long flags;
2738
2739 /*
2740 * don't let napi dequeue from the cpu poll list
2741 * just in case its running on a different cpu
2742 */
2743 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2744 return;
2745
2746 napi_gro_flush(n);
2747 local_irq_save(flags);
2748 __napi_complete(n);
2749 local_irq_restore(flags);
2750}
2751EXPORT_SYMBOL(napi_complete);
2752
2753void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2754 int (*poll)(struct napi_struct *, int), int weight)
2755{
2756 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002757 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002758 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002759 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002760 napi->poll = poll;
2761 napi->weight = weight;
2762 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002763 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002764#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002765 spin_lock_init(&napi->poll_lock);
2766 napi->poll_owner = -1;
2767#endif
2768 set_bit(NAPI_STATE_SCHED, &napi->state);
2769}
2770EXPORT_SYMBOL(netif_napi_add);
2771
2772void netif_napi_del(struct napi_struct *napi)
2773{
2774 struct sk_buff *skb, *next;
2775
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002776 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002777 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002778
2779 for (skb = napi->gro_list; skb; skb = next) {
2780 next = skb->next;
2781 skb->next = NULL;
2782 kfree_skb(skb);
2783 }
2784
2785 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002786 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002787}
2788EXPORT_SYMBOL(netif_napi_del);
2789
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791static void net_rx_action(struct softirq_action *h)
2792{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002793 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002794 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002795 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002796 void *have;
2797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 local_irq_disable();
2799
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002800 while (!list_empty(list)) {
2801 struct napi_struct *n;
2802 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002804 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002805 * Allow this to run for 2 jiffies since which will allow
2806 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002807 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002808 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 goto softnet_break;
2810
2811 local_irq_enable();
2812
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002813 /* Even though interrupts have been re-enabled, this
2814 * access is safe because interrupts can only add new
2815 * entries to the tail of this list, and only ->poll()
2816 * calls can remove this head entry from the list.
2817 */
2818 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002820 have = netpoll_poll_lock(n);
2821
2822 weight = n->weight;
2823
David S. Miller0a7606c2007-10-29 21:28:47 -07002824 /* This NAPI_STATE_SCHED test is for avoiding a race
2825 * with netpoll's poll_napi(). Only the entity which
2826 * obtains the lock and sees NAPI_STATE_SCHED set will
2827 * actually make the ->poll() call. Therefore we avoid
2828 * accidently calling ->poll() when NAPI is not scheduled.
2829 */
2830 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002831 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002832 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002833 trace_napi_poll(n);
2834 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002835
2836 WARN_ON_ONCE(work > weight);
2837
2838 budget -= work;
2839
2840 local_irq_disable();
2841
2842 /* Drivers must not modify the NAPI state if they
2843 * consume the entire weight. In such cases this code
2844 * still "owns" the NAPI instance and therefore can
2845 * move the instance around on the list at-will.
2846 */
David S. Millerfed17f32008-01-07 21:00:40 -08002847 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002848 if (unlikely(napi_disable_pending(n))) {
2849 local_irq_enable();
2850 napi_complete(n);
2851 local_irq_disable();
2852 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002853 list_move_tail(&n->poll_list, list);
2854 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002855
2856 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
2858out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002859 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002860
Chris Leechdb217332006-06-17 21:24:58 -07002861#ifdef CONFIG_NET_DMA
2862 /*
2863 * There may not be any more sk_buffs coming right now, so push
2864 * any pending DMA copies to hardware
2865 */
Dan Williams2ba05622009-01-06 11:38:14 -07002866 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002867#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002868
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 return;
2870
2871softnet_break:
2872 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2873 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2874 goto out;
2875}
2876
2877static gifconf_func_t * gifconf_list [NPROTO];
2878
2879/**
2880 * register_gifconf - register a SIOCGIF handler
2881 * @family: Address family
2882 * @gifconf: Function handler
2883 *
2884 * Register protocol dependent address dumping routines. The handler
2885 * that is passed must not be freed or reused until it has been replaced
2886 * by another handler.
2887 */
2888int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2889{
2890 if (family >= NPROTO)
2891 return -EINVAL;
2892 gifconf_list[family] = gifconf;
2893 return 0;
2894}
2895
2896
2897/*
2898 * Map an interface index to its name (SIOCGIFNAME)
2899 */
2900
2901/*
2902 * We need this ioctl for efficient implementation of the
2903 * if_indextoname() function required by the IPv6 API. Without
2904 * it, we would have to search all the interfaces to find a
2905 * match. --pb
2906 */
2907
Eric W. Biederman881d9662007-09-17 11:56:21 -07002908static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
2910 struct net_device *dev;
2911 struct ifreq ifr;
2912
2913 /*
2914 * Fetch the caller's info block.
2915 */
2916
2917 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2918 return -EFAULT;
2919
2920 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002921 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 if (!dev) {
2923 read_unlock(&dev_base_lock);
2924 return -ENODEV;
2925 }
2926
2927 strcpy(ifr.ifr_name, dev->name);
2928 read_unlock(&dev_base_lock);
2929
2930 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2931 return -EFAULT;
2932 return 0;
2933}
2934
2935/*
2936 * Perform a SIOCGIFCONF call. This structure will change
2937 * size eventually, and there is nothing I can do about it.
2938 * Thus we will need a 'compatibility mode'.
2939 */
2940
Eric W. Biederman881d9662007-09-17 11:56:21 -07002941static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942{
2943 struct ifconf ifc;
2944 struct net_device *dev;
2945 char __user *pos;
2946 int len;
2947 int total;
2948 int i;
2949
2950 /*
2951 * Fetch the caller's info block.
2952 */
2953
2954 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2955 return -EFAULT;
2956
2957 pos = ifc.ifc_buf;
2958 len = ifc.ifc_len;
2959
2960 /*
2961 * Loop over the interfaces, and write an info block for each.
2962 */
2963
2964 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002965 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 for (i = 0; i < NPROTO; i++) {
2967 if (gifconf_list[i]) {
2968 int done;
2969 if (!pos)
2970 done = gifconf_list[i](dev, NULL, 0);
2971 else
2972 done = gifconf_list[i](dev, pos + total,
2973 len - total);
2974 if (done < 0)
2975 return -EFAULT;
2976 total += done;
2977 }
2978 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
2981 /*
2982 * All done. Write the updated control block back to the caller.
2983 */
2984 ifc.ifc_len = total;
2985
2986 /*
2987 * Both BSD and Solaris return 0 here, so we do too.
2988 */
2989 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2990}
2991
2992#ifdef CONFIG_PROC_FS
2993/*
2994 * This is invoked by the /proc filesystem handler to display a device
2995 * in detail.
2996 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002998 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999{
Denis V. Luneve372c412007-11-19 22:31:54 -08003000 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003001 loff_t off;
3002 struct net_device *dev;
3003
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003005 if (!*pos)
3006 return SEQ_START_TOKEN;
3007
3008 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003009 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003010 if (off++ == *pos)
3011 return dev;
3012
3013 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014}
3015
3016void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3017{
Denis V. Luneve372c412007-11-19 22:31:54 -08003018 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003020 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003021 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022}
3023
3024void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003025 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026{
3027 read_unlock(&dev_base_lock);
3028}
3029
3030static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3031{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003032 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
Rusty Russell5a1b5892007-04-28 21:04:03 -07003034 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3035 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3036 dev->name, stats->rx_bytes, stats->rx_packets,
3037 stats->rx_errors,
3038 stats->rx_dropped + stats->rx_missed_errors,
3039 stats->rx_fifo_errors,
3040 stats->rx_length_errors + stats->rx_over_errors +
3041 stats->rx_crc_errors + stats->rx_frame_errors,
3042 stats->rx_compressed, stats->multicast,
3043 stats->tx_bytes, stats->tx_packets,
3044 stats->tx_errors, stats->tx_dropped,
3045 stats->tx_fifo_errors, stats->collisions,
3046 stats->tx_carrier_errors +
3047 stats->tx_aborted_errors +
3048 stats->tx_window_errors +
3049 stats->tx_heartbeat_errors,
3050 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051}
3052
3053/*
3054 * Called from the PROCfs module. This now uses the new arbitrary sized
3055 * /proc/net interface to create /proc/net/dev
3056 */
3057static int dev_seq_show(struct seq_file *seq, void *v)
3058{
3059 if (v == SEQ_START_TOKEN)
3060 seq_puts(seq, "Inter-| Receive "
3061 " | Transmit\n"
3062 " face |bytes packets errs drop fifo frame "
3063 "compressed multicast|bytes packets errs "
3064 "drop fifo colls carrier compressed\n");
3065 else
3066 dev_seq_printf_stats(seq, v);
3067 return 0;
3068}
3069
3070static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3071{
3072 struct netif_rx_stats *rc = NULL;
3073
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003074 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003075 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 rc = &per_cpu(netdev_rx_stat, *pos);
3077 break;
3078 } else
3079 ++*pos;
3080 return rc;
3081}
3082
3083static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3084{
3085 return softnet_get_online(pos);
3086}
3087
3088static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3089{
3090 ++*pos;
3091 return softnet_get_online(pos);
3092}
3093
3094static void softnet_seq_stop(struct seq_file *seq, void *v)
3095{
3096}
3097
3098static int softnet_seq_show(struct seq_file *seq, void *v)
3099{
3100 struct netif_rx_stats *s = v;
3101
3102 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003103 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003104 0, 0, 0, 0, /* was fastroute */
3105 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106 return 0;
3107}
3108
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003109static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 .start = dev_seq_start,
3111 .next = dev_seq_next,
3112 .stop = dev_seq_stop,
3113 .show = dev_seq_show,
3114};
3115
3116static int dev_seq_open(struct inode *inode, struct file *file)
3117{
Denis V. Luneve372c412007-11-19 22:31:54 -08003118 return seq_open_net(inode, file, &dev_seq_ops,
3119 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120}
3121
Arjan van de Ven9a321442007-02-12 00:55:35 -08003122static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003123 .owner = THIS_MODULE,
3124 .open = dev_seq_open,
3125 .read = seq_read,
3126 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003127 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128};
3129
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003130static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 .start = softnet_seq_start,
3132 .next = softnet_seq_next,
3133 .stop = softnet_seq_stop,
3134 .show = softnet_seq_show,
3135};
3136
3137static int softnet_seq_open(struct inode *inode, struct file *file)
3138{
3139 return seq_open(file, &softnet_seq_ops);
3140}
3141
Arjan van de Ven9a321442007-02-12 00:55:35 -08003142static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 .owner = THIS_MODULE,
3144 .open = softnet_seq_open,
3145 .read = seq_read,
3146 .llseek = seq_lseek,
3147 .release = seq_release,
3148};
3149
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003150static void *ptype_get_idx(loff_t pos)
3151{
3152 struct packet_type *pt = NULL;
3153 loff_t i = 0;
3154 int t;
3155
3156 list_for_each_entry_rcu(pt, &ptype_all, list) {
3157 if (i == pos)
3158 return pt;
3159 ++i;
3160 }
3161
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003162 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003163 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3164 if (i == pos)
3165 return pt;
3166 ++i;
3167 }
3168 }
3169 return NULL;
3170}
3171
3172static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003173 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003174{
3175 rcu_read_lock();
3176 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3177}
3178
3179static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3180{
3181 struct packet_type *pt;
3182 struct list_head *nxt;
3183 int hash;
3184
3185 ++*pos;
3186 if (v == SEQ_START_TOKEN)
3187 return ptype_get_idx(0);
3188
3189 pt = v;
3190 nxt = pt->list.next;
3191 if (pt->type == htons(ETH_P_ALL)) {
3192 if (nxt != &ptype_all)
3193 goto found;
3194 hash = 0;
3195 nxt = ptype_base[0].next;
3196 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003197 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003198
3199 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003200 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003201 return NULL;
3202 nxt = ptype_base[hash].next;
3203 }
3204found:
3205 return list_entry(nxt, struct packet_type, list);
3206}
3207
3208static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003209 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003210{
3211 rcu_read_unlock();
3212}
3213
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003214static int ptype_seq_show(struct seq_file *seq, void *v)
3215{
3216 struct packet_type *pt = v;
3217
3218 if (v == SEQ_START_TOKEN)
3219 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003220 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003221 if (pt->type == htons(ETH_P_ALL))
3222 seq_puts(seq, "ALL ");
3223 else
3224 seq_printf(seq, "%04x", ntohs(pt->type));
3225
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003226 seq_printf(seq, " %-8s %pF\n",
3227 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003228 }
3229
3230 return 0;
3231}
3232
3233static const struct seq_operations ptype_seq_ops = {
3234 .start = ptype_seq_start,
3235 .next = ptype_seq_next,
3236 .stop = ptype_seq_stop,
3237 .show = ptype_seq_show,
3238};
3239
3240static int ptype_seq_open(struct inode *inode, struct file *file)
3241{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003242 return seq_open_net(inode, file, &ptype_seq_ops,
3243 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003244}
3245
3246static const struct file_operations ptype_seq_fops = {
3247 .owner = THIS_MODULE,
3248 .open = ptype_seq_open,
3249 .read = seq_read,
3250 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003251 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003252};
3253
3254
Pavel Emelyanov46650792007-10-08 20:38:39 -07003255static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256{
3257 int rc = -ENOMEM;
3258
Eric W. Biederman881d9662007-09-17 11:56:21 -07003259 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003261 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003263 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003264 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003265
Eric W. Biederman881d9662007-09-17 11:56:21 -07003266 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003267 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 rc = 0;
3269out:
3270 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003271out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003272 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003274 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003276 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 goto out;
3278}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003279
Pavel Emelyanov46650792007-10-08 20:38:39 -07003280static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003281{
3282 wext_proc_exit(net);
3283
3284 proc_net_remove(net, "ptype");
3285 proc_net_remove(net, "softnet_stat");
3286 proc_net_remove(net, "dev");
3287}
3288
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003289static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003290 .init = dev_proc_net_init,
3291 .exit = dev_proc_net_exit,
3292};
3293
3294static int __init dev_proc_init(void)
3295{
3296 return register_pernet_subsys(&dev_proc_ops);
3297}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298#else
3299#define dev_proc_init() 0
3300#endif /* CONFIG_PROC_FS */
3301
3302
3303/**
3304 * netdev_set_master - set up master/slave pair
3305 * @slave: slave device
3306 * @master: new master device
3307 *
3308 * Changes the master device of the slave. Pass %NULL to break the
3309 * bonding. The caller must hold the RTNL semaphore. On a failure
3310 * a negative errno code is returned. On success the reference counts
3311 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3312 * function returns zero.
3313 */
3314int netdev_set_master(struct net_device *slave, struct net_device *master)
3315{
3316 struct net_device *old = slave->master;
3317
3318 ASSERT_RTNL();
3319
3320 if (master) {
3321 if (old)
3322 return -EBUSY;
3323 dev_hold(master);
3324 }
3325
3326 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003327
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 synchronize_net();
3329
3330 if (old)
3331 dev_put(old);
3332
3333 if (master)
3334 slave->flags |= IFF_SLAVE;
3335 else
3336 slave->flags &= ~IFF_SLAVE;
3337
3338 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3339 return 0;
3340}
3341
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003342static void dev_change_rx_flags(struct net_device *dev, int flags)
3343{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003344 const struct net_device_ops *ops = dev->netdev_ops;
3345
3346 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3347 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003348}
3349
Wang Chendad9b332008-06-18 01:48:28 -07003350static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003351{
3352 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003353 uid_t uid;
3354 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003355
Patrick McHardy24023452007-07-14 18:51:31 -07003356 ASSERT_RTNL();
3357
Wang Chendad9b332008-06-18 01:48:28 -07003358 dev->flags |= IFF_PROMISC;
3359 dev->promiscuity += inc;
3360 if (dev->promiscuity == 0) {
3361 /*
3362 * Avoid overflow.
3363 * If inc causes overflow, untouch promisc and return error.
3364 */
3365 if (inc < 0)
3366 dev->flags &= ~IFF_PROMISC;
3367 else {
3368 dev->promiscuity -= inc;
3369 printk(KERN_WARNING "%s: promiscuity touches roof, "
3370 "set promiscuity failed, promiscuity feature "
3371 "of device might be broken.\n", dev->name);
3372 return -EOVERFLOW;
3373 }
3374 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003375 if (dev->flags != old_flags) {
3376 printk(KERN_INFO "device %s %s promiscuous mode\n",
3377 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3378 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003379 if (audit_enabled) {
3380 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003381 audit_log(current->audit_context, GFP_ATOMIC,
3382 AUDIT_ANOM_PROMISCUOUS,
3383 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3384 dev->name, (dev->flags & IFF_PROMISC),
3385 (old_flags & IFF_PROMISC),
3386 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003387 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003388 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003389 }
Patrick McHardy24023452007-07-14 18:51:31 -07003390
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003391 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003392 }
Wang Chendad9b332008-06-18 01:48:28 -07003393 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003394}
3395
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396/**
3397 * dev_set_promiscuity - update promiscuity count on a device
3398 * @dev: device
3399 * @inc: modifier
3400 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003401 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402 * remains above zero the interface remains promiscuous. Once it hits zero
3403 * the device reverts back to normal filtering operation. A negative inc
3404 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003405 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406 */
Wang Chendad9b332008-06-18 01:48:28 -07003407int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408{
3409 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003410 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411
Wang Chendad9b332008-06-18 01:48:28 -07003412 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003413 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003414 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003415 if (dev->flags != old_flags)
3416 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003417 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418}
3419
3420/**
3421 * dev_set_allmulti - update allmulti count on a device
3422 * @dev: device
3423 * @inc: modifier
3424 *
3425 * Add or remove reception of all multicast frames to a device. While the
3426 * count in the device remains above zero the interface remains listening
3427 * to all interfaces. Once it hits zero the device reverts back to normal
3428 * filtering operation. A negative @inc value is used to drop the counter
3429 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003430 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 */
3432
Wang Chendad9b332008-06-18 01:48:28 -07003433int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
3435 unsigned short old_flags = dev->flags;
3436
Patrick McHardy24023452007-07-14 18:51:31 -07003437 ASSERT_RTNL();
3438
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003440 dev->allmulti += inc;
3441 if (dev->allmulti == 0) {
3442 /*
3443 * Avoid overflow.
3444 * If inc causes overflow, untouch allmulti and return error.
3445 */
3446 if (inc < 0)
3447 dev->flags &= ~IFF_ALLMULTI;
3448 else {
3449 dev->allmulti -= inc;
3450 printk(KERN_WARNING "%s: allmulti touches roof, "
3451 "set allmulti failed, allmulti feature of "
3452 "device might be broken.\n", dev->name);
3453 return -EOVERFLOW;
3454 }
3455 }
Patrick McHardy24023452007-07-14 18:51:31 -07003456 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003457 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003458 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003459 }
Wang Chendad9b332008-06-18 01:48:28 -07003460 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003461}
3462
3463/*
3464 * Upload unicast and multicast address lists to device and
3465 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003466 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003467 * are present.
3468 */
3469void __dev_set_rx_mode(struct net_device *dev)
3470{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003471 const struct net_device_ops *ops = dev->netdev_ops;
3472
Patrick McHardy4417da62007-06-27 01:28:10 -07003473 /* dev_open will call this function so the list will stay sane. */
3474 if (!(dev->flags&IFF_UP))
3475 return;
3476
3477 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003478 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003479
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003480 if (ops->ndo_set_rx_mode)
3481 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003482 else {
3483 /* Unicast addresses changes may only happen under the rtnl,
3484 * therefore calling __dev_set_promiscuity here is safe.
3485 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003486 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003487 __dev_set_promiscuity(dev, 1);
3488 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003489 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003490 __dev_set_promiscuity(dev, -1);
3491 dev->uc_promisc = 0;
3492 }
3493
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003494 if (ops->ndo_set_multicast_list)
3495 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003496 }
3497}
3498
3499void dev_set_rx_mode(struct net_device *dev)
3500{
David S. Millerb9e40852008-07-15 00:15:08 -07003501 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003502 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003503 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504}
3505
Jiri Pirkof001fde2009-05-05 02:48:28 +00003506/* hw addresses list handling functions */
3507
Jiri Pirko31278e72009-06-17 01:12:19 +00003508static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3509 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003510{
3511 struct netdev_hw_addr *ha;
3512 int alloc_size;
3513
3514 if (addr_len > MAX_ADDR_LEN)
3515 return -EINVAL;
3516
Jiri Pirko31278e72009-06-17 01:12:19 +00003517 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003518 if (!memcmp(ha->addr, addr, addr_len) &&
3519 ha->type == addr_type) {
3520 ha->refcount++;
3521 return 0;
3522 }
3523 }
3524
3525
Jiri Pirkof001fde2009-05-05 02:48:28 +00003526 alloc_size = sizeof(*ha);
3527 if (alloc_size < L1_CACHE_BYTES)
3528 alloc_size = L1_CACHE_BYTES;
3529 ha = kmalloc(alloc_size, GFP_ATOMIC);
3530 if (!ha)
3531 return -ENOMEM;
3532 memcpy(ha->addr, addr, addr_len);
3533 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003534 ha->refcount = 1;
3535 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003536 list_add_tail_rcu(&ha->list, &list->list);
3537 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003538 return 0;
3539}
3540
3541static void ha_rcu_free(struct rcu_head *head)
3542{
3543 struct netdev_hw_addr *ha;
3544
3545 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3546 kfree(ha);
3547}
3548
Jiri Pirko31278e72009-06-17 01:12:19 +00003549static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3550 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003551{
3552 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003553
Jiri Pirko31278e72009-06-17 01:12:19 +00003554 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003555 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003556 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003557 if (--ha->refcount)
3558 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003559 list_del_rcu(&ha->list);
3560 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003561 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003562 return 0;
3563 }
3564 }
3565 return -ENOENT;
3566}
3567
Jiri Pirko31278e72009-06-17 01:12:19 +00003568static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3569 struct netdev_hw_addr_list *from_list,
3570 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003571 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003572{
3573 int err;
3574 struct netdev_hw_addr *ha, *ha2;
3575 unsigned char type;
3576
Jiri Pirko31278e72009-06-17 01:12:19 +00003577 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003578 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003579 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003580 if (err)
3581 goto unroll;
3582 }
3583 return 0;
3584
3585unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003586 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003587 if (ha2 == ha)
3588 break;
3589 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003590 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003591 }
3592 return err;
3593}
3594
Jiri Pirko31278e72009-06-17 01:12:19 +00003595static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3596 struct netdev_hw_addr_list *from_list,
3597 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003598 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003599{
3600 struct netdev_hw_addr *ha;
3601 unsigned char type;
3602
Jiri Pirko31278e72009-06-17 01:12:19 +00003603 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003604 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003605 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003606 }
3607}
3608
Jiri Pirko31278e72009-06-17 01:12:19 +00003609static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3610 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003611 int addr_len)
3612{
3613 int err = 0;
3614 struct netdev_hw_addr *ha, *tmp;
3615
Jiri Pirko31278e72009-06-17 01:12:19 +00003616 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003617 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003618 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003619 addr_len, ha->type);
3620 if (err)
3621 break;
3622 ha->synced = true;
3623 ha->refcount++;
3624 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003625 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3626 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003627 }
3628 }
3629 return err;
3630}
3631
Jiri Pirko31278e72009-06-17 01:12:19 +00003632static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3633 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003634 int addr_len)
3635{
3636 struct netdev_hw_addr *ha, *tmp;
3637
Jiri Pirko31278e72009-06-17 01:12:19 +00003638 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003639 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003640 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003641 addr_len, ha->type);
3642 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003643 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003644 addr_len, ha->type);
3645 }
3646 }
3647}
3648
Jiri Pirko31278e72009-06-17 01:12:19 +00003649static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003650{
3651 struct netdev_hw_addr *ha, *tmp;
3652
Jiri Pirko31278e72009-06-17 01:12:19 +00003653 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003654 list_del_rcu(&ha->list);
3655 call_rcu(&ha->rcu_head, ha_rcu_free);
3656 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003657 list->count = 0;
3658}
3659
3660static void __hw_addr_init(struct netdev_hw_addr_list *list)
3661{
3662 INIT_LIST_HEAD(&list->list);
3663 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003664}
3665
3666/* Device addresses handling functions */
3667
3668static void dev_addr_flush(struct net_device *dev)
3669{
3670 /* rtnl_mutex must be held here */
3671
Jiri Pirko31278e72009-06-17 01:12:19 +00003672 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003673 dev->dev_addr = NULL;
3674}
3675
3676static int dev_addr_init(struct net_device *dev)
3677{
3678 unsigned char addr[MAX_ADDR_LEN];
3679 struct netdev_hw_addr *ha;
3680 int err;
3681
3682 /* rtnl_mutex must be held here */
3683
Jiri Pirko31278e72009-06-17 01:12:19 +00003684 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003685 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003686 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003687 NETDEV_HW_ADDR_T_LAN);
3688 if (!err) {
3689 /*
3690 * Get the first (previously created) address from the list
3691 * and set dev_addr pointer to this location.
3692 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003693 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003694 struct netdev_hw_addr, list);
3695 dev->dev_addr = ha->addr;
3696 }
3697 return err;
3698}
3699
3700/**
3701 * dev_addr_add - Add a device address
3702 * @dev: device
3703 * @addr: address to add
3704 * @addr_type: address type
3705 *
3706 * Add a device address to the device or increase the reference count if
3707 * it already exists.
3708 *
3709 * The caller must hold the rtnl_mutex.
3710 */
3711int dev_addr_add(struct net_device *dev, unsigned char *addr,
3712 unsigned char addr_type)
3713{
3714 int err;
3715
3716 ASSERT_RTNL();
3717
Jiri Pirko31278e72009-06-17 01:12:19 +00003718 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003719 if (!err)
3720 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3721 return err;
3722}
3723EXPORT_SYMBOL(dev_addr_add);
3724
3725/**
3726 * dev_addr_del - Release a device address.
3727 * @dev: device
3728 * @addr: address to delete
3729 * @addr_type: address type
3730 *
3731 * Release reference to a device address and remove it from the device
3732 * if the reference count drops to zero.
3733 *
3734 * The caller must hold the rtnl_mutex.
3735 */
3736int dev_addr_del(struct net_device *dev, unsigned char *addr,
3737 unsigned char addr_type)
3738{
3739 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003740 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003741
3742 ASSERT_RTNL();
3743
Jiri Pirkoccffad252009-05-22 23:22:17 +00003744 /*
3745 * We can not remove the first address from the list because
3746 * dev->dev_addr points to that.
3747 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003748 ha = list_first_entry(&dev->dev_addrs.list,
3749 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003750 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3751 return -ENOENT;
3752
Jiri Pirko31278e72009-06-17 01:12:19 +00003753 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003754 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003755 if (!err)
3756 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3757 return err;
3758}
3759EXPORT_SYMBOL(dev_addr_del);
3760
3761/**
3762 * dev_addr_add_multiple - Add device addresses from another device
3763 * @to_dev: device to which addresses will be added
3764 * @from_dev: device from which addresses will be added
3765 * @addr_type: address type - 0 means type will be used from from_dev
3766 *
3767 * Add device addresses of the one device to another.
3768 **
3769 * The caller must hold the rtnl_mutex.
3770 */
3771int dev_addr_add_multiple(struct net_device *to_dev,
3772 struct net_device *from_dev,
3773 unsigned char addr_type)
3774{
3775 int err;
3776
3777 ASSERT_RTNL();
3778
3779 if (from_dev->addr_len != to_dev->addr_len)
3780 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003781 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003782 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003783 if (!err)
3784 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3785 return err;
3786}
3787EXPORT_SYMBOL(dev_addr_add_multiple);
3788
3789/**
3790 * dev_addr_del_multiple - Delete device addresses by another device
3791 * @to_dev: device where the addresses will be deleted
3792 * @from_dev: device by which addresses the addresses will be deleted
3793 * @addr_type: address type - 0 means type will used from from_dev
3794 *
3795 * Deletes addresses in to device by the list of addresses in from device.
3796 *
3797 * The caller must hold the rtnl_mutex.
3798 */
3799int dev_addr_del_multiple(struct net_device *to_dev,
3800 struct net_device *from_dev,
3801 unsigned char addr_type)
3802{
3803 ASSERT_RTNL();
3804
3805 if (from_dev->addr_len != to_dev->addr_len)
3806 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003807 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003808 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003809 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3810 return 0;
3811}
3812EXPORT_SYMBOL(dev_addr_del_multiple);
3813
Jiri Pirko31278e72009-06-17 01:12:19 +00003814/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003815
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003816int __dev_addr_delete(struct dev_addr_list **list, int *count,
3817 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003818{
3819 struct dev_addr_list *da;
3820
3821 for (; (da = *list) != NULL; list = &da->next) {
3822 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3823 alen == da->da_addrlen) {
3824 if (glbl) {
3825 int old_glbl = da->da_gusers;
3826 da->da_gusers = 0;
3827 if (old_glbl == 0)
3828 break;
3829 }
3830 if (--da->da_users)
3831 return 0;
3832
3833 *list = da->next;
3834 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003835 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003836 return 0;
3837 }
3838 }
3839 return -ENOENT;
3840}
3841
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003842int __dev_addr_add(struct dev_addr_list **list, int *count,
3843 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003844{
3845 struct dev_addr_list *da;
3846
3847 for (da = *list; da != NULL; da = da->next) {
3848 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3849 da->da_addrlen == alen) {
3850 if (glbl) {
3851 int old_glbl = da->da_gusers;
3852 da->da_gusers = 1;
3853 if (old_glbl)
3854 return 0;
3855 }
3856 da->da_users++;
3857 return 0;
3858 }
3859 }
3860
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003861 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003862 if (da == NULL)
3863 return -ENOMEM;
3864 memcpy(da->da_addr, addr, alen);
3865 da->da_addrlen = alen;
3866 da->da_users = 1;
3867 da->da_gusers = glbl ? 1 : 0;
3868 da->next = *list;
3869 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003870 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003871 return 0;
3872}
3873
Patrick McHardy4417da62007-06-27 01:28:10 -07003874/**
3875 * dev_unicast_delete - Release secondary unicast address.
3876 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003877 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003878 *
3879 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003880 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003881 *
3882 * The caller must hold the rtnl_mutex.
3883 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003884int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003885{
3886 int err;
3887
3888 ASSERT_RTNL();
3889
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003890 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003891 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3892 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003893 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003894 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003895 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003896 return err;
3897}
3898EXPORT_SYMBOL(dev_unicast_delete);
3899
3900/**
3901 * dev_unicast_add - add a secondary unicast address
3902 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003903 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003904 *
3905 * Add a secondary unicast address to the device or increase
3906 * the reference count if it already exists.
3907 *
3908 * The caller must hold the rtnl_mutex.
3909 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003910int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003911{
3912 int err;
3913
3914 ASSERT_RTNL();
3915
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003916 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003917 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3918 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003919 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003920 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003921 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003922 return err;
3923}
3924EXPORT_SYMBOL(dev_unicast_add);
3925
Chris Leeche83a2ea2008-01-31 16:53:23 -08003926int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3927 struct dev_addr_list **from, int *from_count)
3928{
3929 struct dev_addr_list *da, *next;
3930 int err = 0;
3931
3932 da = *from;
3933 while (da != NULL) {
3934 next = da->next;
3935 if (!da->da_synced) {
3936 err = __dev_addr_add(to, to_count,
3937 da->da_addr, da->da_addrlen, 0);
3938 if (err < 0)
3939 break;
3940 da->da_synced = 1;
3941 da->da_users++;
3942 } else if (da->da_users == 1) {
3943 __dev_addr_delete(to, to_count,
3944 da->da_addr, da->da_addrlen, 0);
3945 __dev_addr_delete(from, from_count,
3946 da->da_addr, da->da_addrlen, 0);
3947 }
3948 da = next;
3949 }
3950 return err;
3951}
Johannes Bergc4029082009-06-17 17:43:30 +02003952EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003953
3954void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3955 struct dev_addr_list **from, int *from_count)
3956{
3957 struct dev_addr_list *da, *next;
3958
3959 da = *from;
3960 while (da != NULL) {
3961 next = da->next;
3962 if (da->da_synced) {
3963 __dev_addr_delete(to, to_count,
3964 da->da_addr, da->da_addrlen, 0);
3965 da->da_synced = 0;
3966 __dev_addr_delete(from, from_count,
3967 da->da_addr, da->da_addrlen, 0);
3968 }
3969 da = next;
3970 }
3971}
Johannes Bergc4029082009-06-17 17:43:30 +02003972EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003973
3974/**
3975 * dev_unicast_sync - Synchronize device's unicast list to another device
3976 * @to: destination device
3977 * @from: source device
3978 *
3979 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003980 * addresses that have no users left. The source device must be
3981 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08003982 *
3983 * This function is intended to be called from the dev->set_rx_mode
3984 * function of layered software devices.
3985 */
3986int dev_unicast_sync(struct net_device *to, struct net_device *from)
3987{
3988 int err = 0;
3989
Jiri Pirkoccffad252009-05-22 23:22:17 +00003990 if (to->addr_len != from->addr_len)
3991 return -EINVAL;
3992
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003993 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00003994 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003995 if (!err)
3996 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003997 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003998 return err;
3999}
4000EXPORT_SYMBOL(dev_unicast_sync);
4001
4002/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004003 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004004 * @to: destination device
4005 * @from: source device
4006 *
4007 * Remove all addresses that were added to the destination device by
4008 * dev_unicast_sync(). This function is intended to be called from the
4009 * dev->stop function of layered software devices.
4010 */
4011void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4012{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004013 if (to->addr_len != from->addr_len)
4014 return;
4015
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004016 netif_addr_lock_bh(from);
4017 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004018 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004019 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004020 netif_addr_unlock(to);
4021 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004022}
4023EXPORT_SYMBOL(dev_unicast_unsync);
4024
Jiri Pirkoccffad252009-05-22 23:22:17 +00004025static void dev_unicast_flush(struct net_device *dev)
4026{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004027 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004028 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004029 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004030}
4031
4032static void dev_unicast_init(struct net_device *dev)
4033{
Jiri Pirko31278e72009-06-17 01:12:19 +00004034 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004035}
4036
4037
Denis Cheng12972622007-07-18 02:12:56 -07004038static void __dev_addr_discard(struct dev_addr_list **list)
4039{
4040 struct dev_addr_list *tmp;
4041
4042 while (*list != NULL) {
4043 tmp = *list;
4044 *list = tmp->next;
4045 if (tmp->da_users > tmp->da_gusers)
4046 printk("__dev_addr_discard: address leakage! "
4047 "da_users=%d\n", tmp->da_users);
4048 kfree(tmp);
4049 }
4050}
4051
Denis Cheng26cc2522007-07-18 02:12:03 -07004052static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004053{
David S. Millerb9e40852008-07-15 00:15:08 -07004054 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004055
Denis Cheng456ad752007-07-18 02:10:54 -07004056 __dev_addr_discard(&dev->mc_list);
4057 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004058
David S. Millerb9e40852008-07-15 00:15:08 -07004059 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004060}
4061
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004062/**
4063 * dev_get_flags - get flags reported to userspace
4064 * @dev: device
4065 *
4066 * Get the combination of flag bits exported through APIs to userspace.
4067 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068unsigned dev_get_flags(const struct net_device *dev)
4069{
4070 unsigned flags;
4071
4072 flags = (dev->flags & ~(IFF_PROMISC |
4073 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004074 IFF_RUNNING |
4075 IFF_LOWER_UP |
4076 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077 (dev->gflags & (IFF_PROMISC |
4078 IFF_ALLMULTI));
4079
Stefan Rompfb00055a2006-03-20 17:09:11 -08004080 if (netif_running(dev)) {
4081 if (netif_oper_up(dev))
4082 flags |= IFF_RUNNING;
4083 if (netif_carrier_ok(dev))
4084 flags |= IFF_LOWER_UP;
4085 if (netif_dormant(dev))
4086 flags |= IFF_DORMANT;
4087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088
4089 return flags;
4090}
4091
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004092/**
4093 * dev_change_flags - change device settings
4094 * @dev: device
4095 * @flags: device state flags
4096 *
4097 * Change settings on device based state flags. The flags are
4098 * in the userspace exported format.
4099 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100int dev_change_flags(struct net_device *dev, unsigned flags)
4101{
Thomas Graf7c355f52007-06-05 16:03:03 -07004102 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 int old_flags = dev->flags;
4104
Patrick McHardy24023452007-07-14 18:51:31 -07004105 ASSERT_RTNL();
4106
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107 /*
4108 * Set the flags on our device.
4109 */
4110
4111 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4112 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4113 IFF_AUTOMEDIA)) |
4114 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4115 IFF_ALLMULTI));
4116
4117 /*
4118 * Load in the correct multicast list now the flags have changed.
4119 */
4120
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004121 if ((old_flags ^ flags) & IFF_MULTICAST)
4122 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004123
Patrick McHardy4417da62007-06-27 01:28:10 -07004124 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
4126 /*
4127 * Have we downed the interface. We handle IFF_UP ourselves
4128 * according to user attempts to set it, rather than blindly
4129 * setting it.
4130 */
4131
4132 ret = 0;
4133 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4134 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4135
4136 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004137 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 }
4139
4140 if (dev->flags & IFF_UP &&
4141 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4142 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004143 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144
4145 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4146 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4147 dev->gflags ^= IFF_PROMISC;
4148 dev_set_promiscuity(dev, inc);
4149 }
4150
4151 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4152 is important. Some (broken) drivers set IFF_PROMISC, when
4153 IFF_ALLMULTI is requested not asking us and not reporting.
4154 */
4155 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4156 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4157 dev->gflags ^= IFF_ALLMULTI;
4158 dev_set_allmulti(dev, inc);
4159 }
4160
Thomas Graf7c355f52007-06-05 16:03:03 -07004161 /* Exclude state transition flags, already notified */
4162 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4163 if (changes)
4164 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165
4166 return ret;
4167}
4168
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004169/**
4170 * dev_set_mtu - Change maximum transfer unit
4171 * @dev: device
4172 * @new_mtu: new transfer unit
4173 *
4174 * Change the maximum transfer size of the network device.
4175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176int dev_set_mtu(struct net_device *dev, int new_mtu)
4177{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004178 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 int err;
4180
4181 if (new_mtu == dev->mtu)
4182 return 0;
4183
4184 /* MTU must be positive. */
4185 if (new_mtu < 0)
4186 return -EINVAL;
4187
4188 if (!netif_device_present(dev))
4189 return -ENODEV;
4190
4191 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004192 if (ops->ndo_change_mtu)
4193 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 else
4195 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004196
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004198 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 return err;
4200}
4201
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004202/**
4203 * dev_set_mac_address - Change Media Access Control Address
4204 * @dev: device
4205 * @sa: new address
4206 *
4207 * Change the hardware (MAC) address of the device
4208 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4210{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004211 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 int err;
4213
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004214 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 return -EOPNOTSUPP;
4216 if (sa->sa_family != dev->type)
4217 return -EINVAL;
4218 if (!netif_device_present(dev))
4219 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004220 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004222 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 return err;
4224}
4225
4226/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004227 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004229static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230{
4231 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004232 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233
4234 if (!dev)
4235 return -ENODEV;
4236
4237 switch (cmd) {
4238 case SIOCGIFFLAGS: /* Get interface flags */
John Dykstra746e6ad2009-06-11 20:57:21 -07004239 ifr->ifr_flags = (short) dev_get_flags(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 return 0;
4241
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 case SIOCGIFMETRIC: /* Get the metric on the interface
4243 (currently unused) */
4244 ifr->ifr_metric = 0;
4245 return 0;
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 case SIOCGIFMTU: /* Get the MTU of a device */
4248 ifr->ifr_mtu = dev->mtu;
4249 return 0;
4250
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 case SIOCGIFHWADDR:
4252 if (!dev->addr_len)
4253 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4254 else
4255 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4256 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4257 ifr->ifr_hwaddr.sa_family = dev->type;
4258 return 0;
4259
Jeff Garzik14e3e072007-10-08 00:06:32 -07004260 case SIOCGIFSLAVE:
4261 err = -EINVAL;
4262 break;
4263
4264 case SIOCGIFMAP:
4265 ifr->ifr_map.mem_start = dev->mem_start;
4266 ifr->ifr_map.mem_end = dev->mem_end;
4267 ifr->ifr_map.base_addr = dev->base_addr;
4268 ifr->ifr_map.irq = dev->irq;
4269 ifr->ifr_map.dma = dev->dma;
4270 ifr->ifr_map.port = dev->if_port;
4271 return 0;
4272
4273 case SIOCGIFINDEX:
4274 ifr->ifr_ifindex = dev->ifindex;
4275 return 0;
4276
4277 case SIOCGIFTXQLEN:
4278 ifr->ifr_qlen = dev->tx_queue_len;
4279 return 0;
4280
4281 default:
4282 /* dev_ioctl() should ensure this case
4283 * is never reached
4284 */
4285 WARN_ON(1);
4286 err = -EINVAL;
4287 break;
4288
4289 }
4290 return err;
4291}
4292
4293/*
4294 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4295 */
4296static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4297{
4298 int err;
4299 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004300 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004301
4302 if (!dev)
4303 return -ENODEV;
4304
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004305 ops = dev->netdev_ops;
4306
Jeff Garzik14e3e072007-10-08 00:06:32 -07004307 switch (cmd) {
4308 case SIOCSIFFLAGS: /* Set interface flags */
4309 return dev_change_flags(dev, ifr->ifr_flags);
4310
4311 case SIOCSIFMETRIC: /* Set the metric on the interface
4312 (currently unused) */
4313 return -EOPNOTSUPP;
4314
4315 case SIOCSIFMTU: /* Set the MTU of a device */
4316 return dev_set_mtu(dev, ifr->ifr_mtu);
4317
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 case SIOCSIFHWADDR:
4319 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4320
4321 case SIOCSIFHWBROADCAST:
4322 if (ifr->ifr_hwaddr.sa_family != dev->type)
4323 return -EINVAL;
4324 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4325 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004326 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 return 0;
4328
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004330 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 if (!netif_device_present(dev))
4332 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004333 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 }
4335 return -EOPNOTSUPP;
4336
4337 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004338 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004339 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4340 return -EINVAL;
4341 if (!netif_device_present(dev))
4342 return -ENODEV;
4343 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4344 dev->addr_len, 1);
4345
4346 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004347 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4349 return -EINVAL;
4350 if (!netif_device_present(dev))
4351 return -ENODEV;
4352 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4353 dev->addr_len, 1);
4354
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 case SIOCSIFTXQLEN:
4356 if (ifr->ifr_qlen < 0)
4357 return -EINVAL;
4358 dev->tx_queue_len = ifr->ifr_qlen;
4359 return 0;
4360
4361 case SIOCSIFNAME:
4362 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4363 return dev_change_name(dev, ifr->ifr_newname);
4364
4365 /*
4366 * Unknown or private ioctl
4367 */
4368
4369 default:
4370 if ((cmd >= SIOCDEVPRIVATE &&
4371 cmd <= SIOCDEVPRIVATE + 15) ||
4372 cmd == SIOCBONDENSLAVE ||
4373 cmd == SIOCBONDRELEASE ||
4374 cmd == SIOCBONDSETHWADDR ||
4375 cmd == SIOCBONDSLAVEINFOQUERY ||
4376 cmd == SIOCBONDINFOQUERY ||
4377 cmd == SIOCBONDCHANGEACTIVE ||
4378 cmd == SIOCGMIIPHY ||
4379 cmd == SIOCGMIIREG ||
4380 cmd == SIOCSMIIREG ||
4381 cmd == SIOCBRADDIF ||
4382 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004383 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 cmd == SIOCWANDEV) {
4385 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004386 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004388 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004389 else
4390 err = -ENODEV;
4391 }
4392 } else
4393 err = -EINVAL;
4394
4395 }
4396 return err;
4397}
4398
4399/*
4400 * This function handles all "interface"-type I/O control requests. The actual
4401 * 'doing' part of this is dev_ifsioc above.
4402 */
4403
4404/**
4405 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004406 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 * @cmd: command to issue
4408 * @arg: pointer to a struct ifreq in user space
4409 *
4410 * Issue ioctl functions to devices. This is normally called by the
4411 * user space syscall interfaces but can sometimes be useful for
4412 * other purposes. The return value is the return from the syscall if
4413 * positive or a negative errno code on error.
4414 */
4415
Eric W. Biederman881d9662007-09-17 11:56:21 -07004416int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417{
4418 struct ifreq ifr;
4419 int ret;
4420 char *colon;
4421
4422 /* One special case: SIOCGIFCONF takes ifconf argument
4423 and requires shared lock, because it sleeps writing
4424 to user space.
4425 */
4426
4427 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004428 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004429 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004430 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 return ret;
4432 }
4433 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004434 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435
4436 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4437 return -EFAULT;
4438
4439 ifr.ifr_name[IFNAMSIZ-1] = 0;
4440
4441 colon = strchr(ifr.ifr_name, ':');
4442 if (colon)
4443 *colon = 0;
4444
4445 /*
4446 * See which interface the caller is talking about.
4447 */
4448
4449 switch (cmd) {
4450 /*
4451 * These ioctl calls:
4452 * - can be done by all.
4453 * - atomic and do not require locking.
4454 * - return a value
4455 */
4456 case SIOCGIFFLAGS:
4457 case SIOCGIFMETRIC:
4458 case SIOCGIFMTU:
4459 case SIOCGIFHWADDR:
4460 case SIOCGIFSLAVE:
4461 case SIOCGIFMAP:
4462 case SIOCGIFINDEX:
4463 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004464 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004466 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 read_unlock(&dev_base_lock);
4468 if (!ret) {
4469 if (colon)
4470 *colon = ':';
4471 if (copy_to_user(arg, &ifr,
4472 sizeof(struct ifreq)))
4473 ret = -EFAULT;
4474 }
4475 return ret;
4476
4477 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004478 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004480 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 rtnl_unlock();
4482 if (!ret) {
4483 if (colon)
4484 *colon = ':';
4485 if (copy_to_user(arg, &ifr,
4486 sizeof(struct ifreq)))
4487 ret = -EFAULT;
4488 }
4489 return ret;
4490
4491 /*
4492 * These ioctl calls:
4493 * - require superuser power.
4494 * - require strict serialization.
4495 * - return a value
4496 */
4497 case SIOCGMIIPHY:
4498 case SIOCGMIIREG:
4499 case SIOCSIFNAME:
4500 if (!capable(CAP_NET_ADMIN))
4501 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004502 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004503 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004504 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 rtnl_unlock();
4506 if (!ret) {
4507 if (colon)
4508 *colon = ':';
4509 if (copy_to_user(arg, &ifr,
4510 sizeof(struct ifreq)))
4511 ret = -EFAULT;
4512 }
4513 return ret;
4514
4515 /*
4516 * These ioctl calls:
4517 * - require superuser power.
4518 * - require strict serialization.
4519 * - do not return a value
4520 */
4521 case SIOCSIFFLAGS:
4522 case SIOCSIFMETRIC:
4523 case SIOCSIFMTU:
4524 case SIOCSIFMAP:
4525 case SIOCSIFHWADDR:
4526 case SIOCSIFSLAVE:
4527 case SIOCADDMULTI:
4528 case SIOCDELMULTI:
4529 case SIOCSIFHWBROADCAST:
4530 case SIOCSIFTXQLEN:
4531 case SIOCSMIIREG:
4532 case SIOCBONDENSLAVE:
4533 case SIOCBONDRELEASE:
4534 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 case SIOCBONDCHANGEACTIVE:
4536 case SIOCBRADDIF:
4537 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004538 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 if (!capable(CAP_NET_ADMIN))
4540 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004541 /* fall through */
4542 case SIOCBONDSLAVEINFOQUERY:
4543 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004544 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004546 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004547 rtnl_unlock();
4548 return ret;
4549
4550 case SIOCGIFMEM:
4551 /* Get the per device memory space. We can add this but
4552 * currently do not support it */
4553 case SIOCSIFMEM:
4554 /* Set the per device memory buffer space.
4555 * Not applicable in our case */
4556 case SIOCSIFLINK:
4557 return -EINVAL;
4558
4559 /*
4560 * Unknown or private ioctl.
4561 */
4562 default:
4563 if (cmd == SIOCWANDEV ||
4564 (cmd >= SIOCDEVPRIVATE &&
4565 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004566 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004568 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 rtnl_unlock();
4570 if (!ret && copy_to_user(arg, &ifr,
4571 sizeof(struct ifreq)))
4572 ret = -EFAULT;
4573 return ret;
4574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004576 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004577 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 return -EINVAL;
4579 }
4580}
4581
4582
4583/**
4584 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004585 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 *
4587 * Returns a suitable unique value for a new device interface
4588 * number. The caller must hold the rtnl semaphore or the
4589 * dev_base_lock to be sure it remains unique.
4590 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004591static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592{
4593 static int ifindex;
4594 for (;;) {
4595 if (++ifindex <= 0)
4596 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004597 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 return ifindex;
4599 }
4600}
4601
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004603static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004605static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608}
4609
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004610static void rollback_registered(struct net_device *dev)
4611{
4612 BUG_ON(dev_boot_phase);
4613 ASSERT_RTNL();
4614
4615 /* Some devices call without registering for initialization unwind. */
4616 if (dev->reg_state == NETREG_UNINITIALIZED) {
4617 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4618 "was registered\n", dev->name, dev);
4619
4620 WARN_ON(1);
4621 return;
4622 }
4623
4624 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4625
4626 /* If device is running, close it first. */
4627 dev_close(dev);
4628
4629 /* And unlink it from device chain. */
4630 unlist_netdevice(dev);
4631
4632 dev->reg_state = NETREG_UNREGISTERING;
4633
4634 synchronize_net();
4635
4636 /* Shutdown queueing discipline. */
4637 dev_shutdown(dev);
4638
4639
4640 /* Notify protocols, that we are about to destroy
4641 this device. They should clean all the things.
4642 */
4643 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4644
4645 /*
4646 * Flush the unicast and multicast chains
4647 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004648 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004649 dev_addr_discard(dev);
4650
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004651 if (dev->netdev_ops->ndo_uninit)
4652 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004653
4654 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004655 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004656
4657 /* Remove entries from kobject tree */
4658 netdev_unregister_kobject(dev);
4659
4660 synchronize_net();
4661
4662 dev_put(dev);
4663}
4664
David S. Millere8a04642008-07-17 00:34:19 -07004665static void __netdev_init_queue_locks_one(struct net_device *dev,
4666 struct netdev_queue *dev_queue,
4667 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004668{
4669 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004670 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004671 dev_queue->xmit_lock_owner = -1;
4672}
4673
4674static void netdev_init_queue_locks(struct net_device *dev)
4675{
David S. Millere8a04642008-07-17 00:34:19 -07004676 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4677 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004678}
4679
Herbert Xub63365a2008-10-23 01:11:29 -07004680unsigned long netdev_fix_features(unsigned long features, const char *name)
4681{
4682 /* Fix illegal SG+CSUM combinations. */
4683 if ((features & NETIF_F_SG) &&
4684 !(features & NETIF_F_ALL_CSUM)) {
4685 if (name)
4686 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4687 "checksum feature.\n", name);
4688 features &= ~NETIF_F_SG;
4689 }
4690
4691 /* TSO requires that SG is present as well. */
4692 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4693 if (name)
4694 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4695 "SG feature.\n", name);
4696 features &= ~NETIF_F_TSO;
4697 }
4698
4699 if (features & NETIF_F_UFO) {
4700 if (!(features & NETIF_F_GEN_CSUM)) {
4701 if (name)
4702 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4703 "since no NETIF_F_HW_CSUM feature.\n",
4704 name);
4705 features &= ~NETIF_F_UFO;
4706 }
4707
4708 if (!(features & NETIF_F_SG)) {
4709 if (name)
4710 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4711 "since no NETIF_F_SG feature.\n", name);
4712 features &= ~NETIF_F_UFO;
4713 }
4714 }
4715
4716 return features;
4717}
4718EXPORT_SYMBOL(netdev_fix_features);
4719
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720/**
4721 * register_netdevice - register a network device
4722 * @dev: device to register
4723 *
4724 * Take a completed network device structure and add it to the kernel
4725 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4726 * chain. 0 is returned on success. A negative errno code is returned
4727 * on a failure to set up the device, or if the name is a duplicate.
4728 *
4729 * Callers must hold the rtnl semaphore. You may want
4730 * register_netdev() instead of this.
4731 *
4732 * BUGS:
4733 * The locking appears insufficient to guarantee two parallel registers
4734 * will not get the same name.
4735 */
4736
4737int register_netdevice(struct net_device *dev)
4738{
4739 struct hlist_head *head;
4740 struct hlist_node *p;
4741 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004742 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
4744 BUG_ON(dev_boot_phase);
4745 ASSERT_RTNL();
4746
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004747 might_sleep();
4748
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 /* When net_device's are persistent, this will be fatal. */
4750 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004751 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752
David S. Millerf1f28aa2008-07-15 00:08:33 -07004753 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004754 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004755 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 dev->iflink = -1;
4758
4759 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004760 if (dev->netdev_ops->ndo_init) {
4761 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 if (ret) {
4763 if (ret > 0)
4764 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004765 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 }
4767 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004768
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 if (!dev_valid_name(dev->name)) {
4770 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004771 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772 }
4773
Eric W. Biederman881d9662007-09-17 11:56:21 -07004774 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 if (dev->iflink == -1)
4776 dev->iflink = dev->ifindex;
4777
4778 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004779 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780 hlist_for_each(p, head) {
4781 struct net_device *d
4782 = hlist_entry(p, struct net_device, name_hlist);
4783 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4784 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004785 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004787 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004789 /* Fix illegal checksum combinations */
4790 if ((dev->features & NETIF_F_HW_CSUM) &&
4791 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4792 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4793 dev->name);
4794 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4795 }
4796
4797 if ((dev->features & NETIF_F_NO_CSUM) &&
4798 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4799 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4800 dev->name);
4801 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4802 }
4803
Herbert Xub63365a2008-10-23 01:11:29 -07004804 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004806 /* Enable software GSO if SG is supported. */
4807 if (dev->features & NETIF_F_SG)
4808 dev->features |= NETIF_F_GSO;
4809
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004810 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004811 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004812 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004813 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004814 dev->reg_state = NETREG_REGISTERED;
4815
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816 /*
4817 * Default initial state at registry is that the
4818 * device is present.
4819 */
4820
4821 set_bit(__LINK_STATE_PRESENT, &dev->state);
4822
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004825 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004826
4827 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004828 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004829 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004830 if (ret) {
4831 rollback_registered(dev);
4832 dev->reg_state = NETREG_UNREGISTERED;
4833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834
4835out:
4836 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004837
4838err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004839 if (dev->netdev_ops->ndo_uninit)
4840 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004841 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004842}
4843
4844/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004845 * init_dummy_netdev - init a dummy network device for NAPI
4846 * @dev: device to init
4847 *
4848 * This takes a network device structure and initialize the minimum
4849 * amount of fields so it can be used to schedule NAPI polls without
4850 * registering a full blown interface. This is to be used by drivers
4851 * that need to tie several hardware interfaces to a single NAPI
4852 * poll scheduler due to HW limitations.
4853 */
4854int init_dummy_netdev(struct net_device *dev)
4855{
4856 /* Clear everything. Note we don't initialize spinlocks
4857 * are they aren't supposed to be taken by any of the
4858 * NAPI code and this dummy netdev is supposed to be
4859 * only ever used for NAPI polls
4860 */
4861 memset(dev, 0, sizeof(struct net_device));
4862
4863 /* make sure we BUG if trying to hit standard
4864 * register/unregister code path
4865 */
4866 dev->reg_state = NETREG_DUMMY;
4867
4868 /* initialize the ref count */
4869 atomic_set(&dev->refcnt, 1);
4870
4871 /* NAPI wants this */
4872 INIT_LIST_HEAD(&dev->napi_list);
4873
4874 /* a dummy interface is started by default */
4875 set_bit(__LINK_STATE_PRESENT, &dev->state);
4876 set_bit(__LINK_STATE_START, &dev->state);
4877
4878 return 0;
4879}
4880EXPORT_SYMBOL_GPL(init_dummy_netdev);
4881
4882
4883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 * register_netdev - register a network device
4885 * @dev: device to register
4886 *
4887 * Take a completed network device structure and add it to the kernel
4888 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4889 * chain. 0 is returned on success. A negative errno code is returned
4890 * on a failure to set up the device, or if the name is a duplicate.
4891 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004892 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004893 * and expands the device name if you passed a format string to
4894 * alloc_netdev.
4895 */
4896int register_netdev(struct net_device *dev)
4897{
4898 int err;
4899
4900 rtnl_lock();
4901
4902 /*
4903 * If the name is a format string the caller wants us to do a
4904 * name allocation.
4905 */
4906 if (strchr(dev->name, '%')) {
4907 err = dev_alloc_name(dev, dev->name);
4908 if (err < 0)
4909 goto out;
4910 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004911
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912 err = register_netdevice(dev);
4913out:
4914 rtnl_unlock();
4915 return err;
4916}
4917EXPORT_SYMBOL(register_netdev);
4918
4919/*
4920 * netdev_wait_allrefs - wait until all references are gone.
4921 *
4922 * This is called when unregistering network devices.
4923 *
4924 * Any protocol or device that holds a reference should register
4925 * for netdevice notification, and cleanup and put back the
4926 * reference if they receive an UNREGISTER event.
4927 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004928 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 */
4930static void netdev_wait_allrefs(struct net_device *dev)
4931{
4932 unsigned long rebroadcast_time, warning_time;
4933
4934 rebroadcast_time = warning_time = jiffies;
4935 while (atomic_read(&dev->refcnt) != 0) {
4936 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004937 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938
4939 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004940 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941
4942 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4943 &dev->state)) {
4944 /* We must not have linkwatch events
4945 * pending on unregister. If this
4946 * happens, we simply run the queue
4947 * unscheduled, resulting in a noop
4948 * for this device.
4949 */
4950 linkwatch_run_queue();
4951 }
4952
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004953 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954
4955 rebroadcast_time = jiffies;
4956 }
4957
4958 msleep(250);
4959
4960 if (time_after(jiffies, warning_time + 10 * HZ)) {
4961 printk(KERN_EMERG "unregister_netdevice: "
4962 "waiting for %s to become free. Usage "
4963 "count = %d\n",
4964 dev->name, atomic_read(&dev->refcnt));
4965 warning_time = jiffies;
4966 }
4967 }
4968}
4969
4970/* The sequence is:
4971 *
4972 * rtnl_lock();
4973 * ...
4974 * register_netdevice(x1);
4975 * register_netdevice(x2);
4976 * ...
4977 * unregister_netdevice(y1);
4978 * unregister_netdevice(y2);
4979 * ...
4980 * rtnl_unlock();
4981 * free_netdev(y1);
4982 * free_netdev(y2);
4983 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004984 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004986 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 * without deadlocking with linkwatch via keventd.
4988 * 2) Since we run with the RTNL semaphore not held, we can sleep
4989 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004990 *
4991 * We must not return until all unregister events added during
4992 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994void netdev_run_todo(void)
4995{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004996 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004999 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005000
5001 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 while (!list_empty(&list)) {
5004 struct net_device *dev
5005 = list_entry(list.next, struct net_device, todo_list);
5006 list_del(&dev->todo_list);
5007
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005008 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 printk(KERN_ERR "network todo '%s' but state %d\n",
5010 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005011 dump_stack();
5012 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005014
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005015 dev->reg_state = NETREG_UNREGISTERED;
5016
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005017 on_each_cpu(flush_backlog, dev, 1);
5018
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005019 netdev_wait_allrefs(dev);
5020
5021 /* paranoia */
5022 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005023 WARN_ON(dev->ip_ptr);
5024 WARN_ON(dev->ip6_ptr);
5025 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005026
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005027 if (dev->destructor)
5028 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005029
5030 /* Free network device */
5031 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033}
5034
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005035/**
5036 * dev_get_stats - get network device statistics
5037 * @dev: device to get statistics from
5038 *
5039 * Get network statistics from device. The device driver may provide
5040 * its own method by setting dev->netdev_ops->get_stats; otherwise
5041 * the internal statistics structure is used.
5042 */
5043const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005044{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005045 const struct net_device_ops *ops = dev->netdev_ops;
5046
5047 if (ops->ndo_get_stats)
5048 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005049 else {
5050 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5051 struct net_device_stats *stats = &dev->stats;
5052 unsigned int i;
5053 struct netdev_queue *txq;
5054
5055 for (i = 0; i < dev->num_tx_queues; i++) {
5056 txq = netdev_get_tx_queue(dev, i);
5057 tx_bytes += txq->tx_bytes;
5058 tx_packets += txq->tx_packets;
5059 tx_dropped += txq->tx_dropped;
5060 }
5061 if (tx_bytes || tx_packets || tx_dropped) {
5062 stats->tx_bytes = tx_bytes;
5063 stats->tx_packets = tx_packets;
5064 stats->tx_dropped = tx_dropped;
5065 }
5066 return stats;
5067 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005068}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005069EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005070
David S. Millerdc2b4842008-07-08 17:18:23 -07005071static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005072 struct netdev_queue *queue,
5073 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005074{
David S. Millerdc2b4842008-07-08 17:18:23 -07005075 queue->dev = dev;
5076}
5077
David S. Millerbb949fb2008-07-08 16:55:56 -07005078static void netdev_init_queues(struct net_device *dev)
5079{
David S. Millere8a04642008-07-17 00:34:19 -07005080 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5081 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005082 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005083}
5084
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005086 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 * @sizeof_priv: size of private data to allocate space for
5088 * @name: device name format string
5089 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005090 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 *
5092 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005093 * and performs basic initialization. Also allocates subquue structs
5094 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005096struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5097 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098{
David S. Millere8a04642008-07-17 00:34:19 -07005099 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005101 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005102 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005103
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005104 BUG_ON(strlen(name) >= sizeof(dev->name));
5105
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005106 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005107 if (sizeof_priv) {
5108 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005109 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005110 alloc_size += sizeof_priv;
5111 }
5112 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005113 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005115 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005117 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118 return NULL;
5119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120
Stephen Hemminger79439862008-07-21 13:28:44 -07005121 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005122 if (!tx) {
5123 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5124 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005125 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005126 }
5127
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005128 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005130
5131 if (dev_addr_init(dev))
5132 goto free_tx;
5133
Jiri Pirkoccffad252009-05-22 23:22:17 +00005134 dev_unicast_init(dev);
5135
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005136 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
David S. Millere8a04642008-07-17 00:34:19 -07005138 dev->_tx = tx;
5139 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005140 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005141
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005142 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143
David S. Millerbb949fb2008-07-08 16:55:56 -07005144 netdev_init_queues(dev);
5145
Herbert Xud565b0a2008-12-15 23:38:52 -08005146 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005147 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 setup(dev);
5149 strcpy(dev->name, name);
5150 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005151
5152free_tx:
5153 kfree(tx);
5154
5155free_p:
5156 kfree(p);
5157 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005159EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160
5161/**
5162 * free_netdev - free network device
5163 * @dev: device
5164 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005165 * This function does the last stage of destroying an allocated device
5166 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 * If this is the last reference then it will be freed.
5168 */
5169void free_netdev(struct net_device *dev)
5170{
Herbert Xud565b0a2008-12-15 23:38:52 -08005171 struct napi_struct *p, *n;
5172
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005173 release_net(dev_net(dev));
5174
David S. Millere8a04642008-07-17 00:34:19 -07005175 kfree(dev->_tx);
5176
Jiri Pirkof001fde2009-05-05 02:48:28 +00005177 /* Flush device addresses */
5178 dev_addr_flush(dev);
5179
Herbert Xud565b0a2008-12-15 23:38:52 -08005180 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5181 netif_napi_del(p);
5182
Stephen Hemminger3041a062006-05-26 13:25:24 -07005183 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 if (dev->reg_state == NETREG_UNINITIALIZED) {
5185 kfree((char *)dev - dev->padded);
5186 return;
5187 }
5188
5189 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5190 dev->reg_state = NETREG_RELEASED;
5191
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005192 /* will free via device release */
5193 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005195
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005196/**
5197 * synchronize_net - Synchronize with packet receive processing
5198 *
5199 * Wait for packets currently being received to be done.
5200 * Does not block later packets from starting.
5201 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005202void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203{
5204 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005205 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206}
5207
5208/**
5209 * unregister_netdevice - remove device from the kernel
5210 * @dev: device
5211 *
5212 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005213 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214 *
5215 * Callers must hold the rtnl semaphore. You may want
5216 * unregister_netdev() instead of this.
5217 */
5218
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005219void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220{
Herbert Xua6620712007-12-12 19:21:56 -08005221 ASSERT_RTNL();
5222
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005223 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 /* Finish processing unregister after unlock */
5225 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226}
5227
5228/**
5229 * unregister_netdev - remove device from the kernel
5230 * @dev: device
5231 *
5232 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005233 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234 *
5235 * This is just a wrapper for unregister_netdevice that takes
5236 * the rtnl semaphore. In general you want to use this and not
5237 * unregister_netdevice.
5238 */
5239void unregister_netdev(struct net_device *dev)
5240{
5241 rtnl_lock();
5242 unregister_netdevice(dev);
5243 rtnl_unlock();
5244}
5245
5246EXPORT_SYMBOL(unregister_netdev);
5247
Eric W. Biedermance286d32007-09-12 13:53:49 +02005248/**
5249 * dev_change_net_namespace - move device to different nethost namespace
5250 * @dev: device
5251 * @net: network namespace
5252 * @pat: If not NULL name pattern to try if the current device name
5253 * is already taken in the destination network namespace.
5254 *
5255 * This function shuts down a device interface and moves it
5256 * to a new network namespace. On success 0 is returned, on
5257 * a failure a netagive errno code is returned.
5258 *
5259 * Callers must hold the rtnl semaphore.
5260 */
5261
5262int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5263{
5264 char buf[IFNAMSIZ];
5265 const char *destname;
5266 int err;
5267
5268 ASSERT_RTNL();
5269
5270 /* Don't allow namespace local devices to be moved. */
5271 err = -EINVAL;
5272 if (dev->features & NETIF_F_NETNS_LOCAL)
5273 goto out;
5274
Eric W. Biederman38918452008-10-27 17:51:47 -07005275#ifdef CONFIG_SYSFS
5276 /* Don't allow real devices to be moved when sysfs
5277 * is enabled.
5278 */
5279 err = -EINVAL;
5280 if (dev->dev.parent)
5281 goto out;
5282#endif
5283
Eric W. Biedermance286d32007-09-12 13:53:49 +02005284 /* Ensure the device has been registrered */
5285 err = -EINVAL;
5286 if (dev->reg_state != NETREG_REGISTERED)
5287 goto out;
5288
5289 /* Get out if there is nothing todo */
5290 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005291 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005292 goto out;
5293
5294 /* Pick the destination device name, and ensure
5295 * we can use it in the destination network namespace.
5296 */
5297 err = -EEXIST;
5298 destname = dev->name;
5299 if (__dev_get_by_name(net, destname)) {
5300 /* We get here if we can't use the current device name */
5301 if (!pat)
5302 goto out;
5303 if (!dev_valid_name(pat))
5304 goto out;
5305 if (strchr(pat, '%')) {
5306 if (__dev_alloc_name(net, pat, buf) < 0)
5307 goto out;
5308 destname = buf;
5309 } else
5310 destname = pat;
5311 if (__dev_get_by_name(net, destname))
5312 goto out;
5313 }
5314
5315 /*
5316 * And now a mini version of register_netdevice unregister_netdevice.
5317 */
5318
5319 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005320 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005321
5322 /* And unlink it from device chain */
5323 err = -ENODEV;
5324 unlist_netdevice(dev);
5325
5326 synchronize_net();
5327
5328 /* Shutdown queueing discipline. */
5329 dev_shutdown(dev);
5330
5331 /* Notify protocols, that we are about to destroy
5332 this device. They should clean all the things.
5333 */
5334 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5335
5336 /*
5337 * Flush the unicast and multicast chains
5338 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005339 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005340 dev_addr_discard(dev);
5341
Eric W. Biederman38918452008-10-27 17:51:47 -07005342 netdev_unregister_kobject(dev);
5343
Eric W. Biedermance286d32007-09-12 13:53:49 +02005344 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005345 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005346
5347 /* Assign the new device name */
5348 if (destname != dev->name)
5349 strcpy(dev->name, destname);
5350
5351 /* If there is an ifindex conflict assign a new one */
5352 if (__dev_get_by_index(net, dev->ifindex)) {
5353 int iflink = (dev->iflink == dev->ifindex);
5354 dev->ifindex = dev_new_index(net);
5355 if (iflink)
5356 dev->iflink = dev->ifindex;
5357 }
5358
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005359 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005360 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005361 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005362
5363 /* Add the device back in the hashes */
5364 list_netdevice(dev);
5365
5366 /* Notify protocols, that a new device appeared. */
5367 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5368
5369 synchronize_net();
5370 err = 0;
5371out:
5372 return err;
5373}
Johannes Berg463d0182009-07-14 00:33:35 +02005374EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005375
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376static int dev_cpu_callback(struct notifier_block *nfb,
5377 unsigned long action,
5378 void *ocpu)
5379{
5380 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005381 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382 struct sk_buff *skb;
5383 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5384 struct softnet_data *sd, *oldsd;
5385
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005386 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 return NOTIFY_OK;
5388
5389 local_irq_disable();
5390 cpu = smp_processor_id();
5391 sd = &per_cpu(softnet_data, cpu);
5392 oldsd = &per_cpu(softnet_data, oldcpu);
5393
5394 /* Find end of our completion_queue. */
5395 list_skb = &sd->completion_queue;
5396 while (*list_skb)
5397 list_skb = &(*list_skb)->next;
5398 /* Append completion queue from offline CPU. */
5399 *list_skb = oldsd->completion_queue;
5400 oldsd->completion_queue = NULL;
5401
5402 /* Find end of our output_queue. */
5403 list_net = &sd->output_queue;
5404 while (*list_net)
5405 list_net = &(*list_net)->next_sched;
5406 /* Append output queue from offline CPU. */
5407 *list_net = oldsd->output_queue;
5408 oldsd->output_queue = NULL;
5409
5410 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5411 local_irq_enable();
5412
5413 /* Process offline CPU's input_pkt_queue */
5414 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5415 netif_rx(skb);
5416
5417 return NOTIFY_OK;
5418}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419
5420
Herbert Xu7f353bf2007-08-10 15:47:58 -07005421/**
Herbert Xub63365a2008-10-23 01:11:29 -07005422 * netdev_increment_features - increment feature set by one
5423 * @all: current feature set
5424 * @one: new feature set
5425 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005426 *
5427 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005428 * @one to the master device with current feature set @all. Will not
5429 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005430 */
Herbert Xub63365a2008-10-23 01:11:29 -07005431unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5432 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005433{
Herbert Xub63365a2008-10-23 01:11:29 -07005434 /* If device needs checksumming, downgrade to it. */
5435 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5436 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5437 else if (mask & NETIF_F_ALL_CSUM) {
5438 /* If one device supports v4/v6 checksumming, set for all. */
5439 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5440 !(all & NETIF_F_GEN_CSUM)) {
5441 all &= ~NETIF_F_ALL_CSUM;
5442 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5443 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005444
Herbert Xub63365a2008-10-23 01:11:29 -07005445 /* If one device supports hw checksumming, set for all. */
5446 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5447 all &= ~NETIF_F_ALL_CSUM;
5448 all |= NETIF_F_HW_CSUM;
5449 }
5450 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005451
Herbert Xub63365a2008-10-23 01:11:29 -07005452 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005453
Herbert Xub63365a2008-10-23 01:11:29 -07005454 one |= all & NETIF_F_ONE_FOR_ALL;
5455 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5456 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005457
5458 return all;
5459}
Herbert Xub63365a2008-10-23 01:11:29 -07005460EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005461
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005462static struct hlist_head *netdev_create_hash(void)
5463{
5464 int i;
5465 struct hlist_head *hash;
5466
5467 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5468 if (hash != NULL)
5469 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5470 INIT_HLIST_HEAD(&hash[i]);
5471
5472 return hash;
5473}
5474
Eric W. Biederman881d9662007-09-17 11:56:21 -07005475/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005476static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005477{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005478 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005479
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005480 net->dev_name_head = netdev_create_hash();
5481 if (net->dev_name_head == NULL)
5482 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005483
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005484 net->dev_index_head = netdev_create_hash();
5485 if (net->dev_index_head == NULL)
5486 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005487
5488 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005489
5490err_idx:
5491 kfree(net->dev_name_head);
5492err_name:
5493 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005494}
5495
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005496/**
5497 * netdev_drivername - network driver for the device
5498 * @dev: network device
5499 * @buffer: buffer for resulting name
5500 * @len: size of buffer
5501 *
5502 * Determine network driver for device.
5503 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005504char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005505{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005506 const struct device_driver *driver;
5507 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005508
5509 if (len <= 0 || !buffer)
5510 return buffer;
5511 buffer[0] = 0;
5512
5513 parent = dev->dev.parent;
5514
5515 if (!parent)
5516 return buffer;
5517
5518 driver = parent->driver;
5519 if (driver && driver->name)
5520 strlcpy(buffer, driver->name, len);
5521 return buffer;
5522}
5523
Pavel Emelyanov46650792007-10-08 20:38:39 -07005524static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005525{
5526 kfree(net->dev_name_head);
5527 kfree(net->dev_index_head);
5528}
5529
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005530static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005531 .init = netdev_init,
5532 .exit = netdev_exit,
5533};
5534
Pavel Emelyanov46650792007-10-08 20:38:39 -07005535static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005536{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005537 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005538 /*
5539 * Push all migratable of the network devices back to the
5540 * initial network namespace
5541 */
5542 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005543restart:
5544 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005545 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005546 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005547
5548 /* Ignore unmoveable devices (i.e. loopback) */
5549 if (dev->features & NETIF_F_NETNS_LOCAL)
5550 continue;
5551
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005552 /* Delete virtual devices */
5553 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5554 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005555 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005556 }
5557
Eric W. Biedermance286d32007-09-12 13:53:49 +02005558 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005559 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5560 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005561 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005562 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005563 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005564 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005565 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005566 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005567 }
5568 rtnl_unlock();
5569}
5570
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005571static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005572 .exit = default_device_exit,
5573};
5574
Linus Torvalds1da177e2005-04-16 15:20:36 -07005575/*
5576 * Initialize the DEV module. At boot time this walks the device list and
5577 * unhooks any devices that fail to initialise (normally hardware not
5578 * present) and leaves us with a valid list of present and active devices.
5579 *
5580 */
5581
5582/*
5583 * This is called single threaded during boot, so no need
5584 * to take the rtnl semaphore.
5585 */
5586static int __init net_dev_init(void)
5587{
5588 int i, rc = -ENOMEM;
5589
5590 BUG_ON(!dev_boot_phase);
5591
Linus Torvalds1da177e2005-04-16 15:20:36 -07005592 if (dev_proc_init())
5593 goto out;
5594
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005595 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005596 goto out;
5597
5598 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005599 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005600 INIT_LIST_HEAD(&ptype_base[i]);
5601
Eric W. Biederman881d9662007-09-17 11:56:21 -07005602 if (register_pernet_subsys(&netdev_net_ops))
5603 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604
5605 /*
5606 * Initialise the packet receive queues.
5607 */
5608
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005609 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005610 struct softnet_data *queue;
5611
5612 queue = &per_cpu(softnet_data, i);
5613 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005614 queue->completion_queue = NULL;
5615 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005616
5617 queue->backlog.poll = process_backlog;
5618 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005619 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005620 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005621 }
5622
Linus Torvalds1da177e2005-04-16 15:20:36 -07005623 dev_boot_phase = 0;
5624
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005625 /* The loopback device is special if any other network devices
5626 * is present in a network namespace the loopback device must
5627 * be present. Since we now dynamically allocate and free the
5628 * loopback device ensure this invariant is maintained by
5629 * keeping the loopback device as the first device on the
5630 * list of network devices. Ensuring the loopback devices
5631 * is the first device that appears and the last network device
5632 * that disappears.
5633 */
5634 if (register_pernet_device(&loopback_net_ops))
5635 goto out;
5636
5637 if (register_pernet_device(&default_device_ops))
5638 goto out;
5639
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005640 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5641 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642
5643 hotcpu_notifier(dev_cpu_callback, 0);
5644 dst_init();
5645 dev_mcast_init();
5646 rc = 0;
5647out:
5648 return rc;
5649}
5650
5651subsys_initcall(net_dev_init);
5652
Krishna Kumare88721f2009-02-18 17:55:02 -08005653static int __init initialize_hashrnd(void)
5654{
5655 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5656 return 0;
5657}
5658
5659late_initcall_sync(initialize_hashrnd);
5660
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661EXPORT_SYMBOL(__dev_get_by_index);
5662EXPORT_SYMBOL(__dev_get_by_name);
5663EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005664EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005665EXPORT_SYMBOL(dev_add_pack);
5666EXPORT_SYMBOL(dev_alloc_name);
5667EXPORT_SYMBOL(dev_close);
5668EXPORT_SYMBOL(dev_get_by_flags);
5669EXPORT_SYMBOL(dev_get_by_index);
5670EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671EXPORT_SYMBOL(dev_open);
5672EXPORT_SYMBOL(dev_queue_xmit);
5673EXPORT_SYMBOL(dev_remove_pack);
5674EXPORT_SYMBOL(dev_set_allmulti);
5675EXPORT_SYMBOL(dev_set_promiscuity);
5676EXPORT_SYMBOL(dev_change_flags);
5677EXPORT_SYMBOL(dev_set_mtu);
5678EXPORT_SYMBOL(dev_set_mac_address);
5679EXPORT_SYMBOL(free_netdev);
5680EXPORT_SYMBOL(netdev_boot_setup_check);
5681EXPORT_SYMBOL(netdev_set_master);
5682EXPORT_SYMBOL(netdev_state_change);
5683EXPORT_SYMBOL(netif_receive_skb);
5684EXPORT_SYMBOL(netif_rx);
5685EXPORT_SYMBOL(register_gifconf);
5686EXPORT_SYMBOL(register_netdevice);
5687EXPORT_SYMBOL(register_netdevice_notifier);
5688EXPORT_SYMBOL(skb_checksum_help);
5689EXPORT_SYMBOL(synchronize_net);
5690EXPORT_SYMBOL(unregister_netdevice);
5691EXPORT_SYMBOL(unregister_netdevice_notifier);
5692EXPORT_SYMBOL(net_enable_timestamp);
5693EXPORT_SYMBOL(net_disable_timestamp);
5694EXPORT_SYMBOL(dev_get_flags);
5695
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697
5698EXPORT_PER_CPU_SYMBOL(softnet_data);