blob: baf2dc13a34a402aa22af5091bf87d2e1c9f1f16 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
275static const char *netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
400 struct list_head *head;
401 struct packet_type *pt1;
402
403 spin_lock_bh(&ptype_lock);
404
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700407 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800408 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 list_for_each_entry(pt1, head, list) {
411 if (pt == pt1) {
412 list_del_rcu(&pt->list);
413 goto out;
414 }
415 }
416
417 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
418out:
419 spin_unlock_bh(&ptype_lock);
420}
421/**
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
424 *
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
428 * returns.
429 *
430 * This call sleeps to guarantee that no CPU is looking at the packet
431 * type after return.
432 */
433void dev_remove_pack(struct packet_type *pt)
434{
435 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 synchronize_net();
438}
439
440/******************************************************************************
441
442 Device Boot-time Settings Routines
443
444*******************************************************************************/
445
446/* Boot time configuration table */
447static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
448
449/**
450 * netdev_boot_setup_add - add new setup entry
451 * @name: name of the device
452 * @map: configured settings for the device
453 *
454 * Adds new setup entry to the dev_boot_setup list. The function
455 * returns 0 on error and 1 on success. This is a generic routine to
456 * all netdevices.
457 */
458static int netdev_boot_setup_add(char *name, struct ifmap *map)
459{
460 struct netdev_boot_setup *s;
461 int i;
462
463 s = dev_boot_setup;
464 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
465 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
466 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700467 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 memcpy(&s[i].map, map, sizeof(s[i].map));
469 break;
470 }
471 }
472
473 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
474}
475
476/**
477 * netdev_boot_setup_check - check boot time settings
478 * @dev: the netdevice
479 *
480 * Check boot time settings for the device.
481 * The found settings are set for the device to be used
482 * later in the device probing.
483 * Returns 0 if no settings found, 1 if they are.
484 */
485int netdev_boot_setup_check(struct net_device *dev)
486{
487 struct netdev_boot_setup *s = dev_boot_setup;
488 int i;
489
490 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
491 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700492 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 dev->irq = s[i].map.irq;
494 dev->base_addr = s[i].map.base_addr;
495 dev->mem_start = s[i].map.mem_start;
496 dev->mem_end = s[i].map.mem_end;
497 return 1;
498 }
499 }
500 return 0;
501}
502
503
504/**
505 * netdev_boot_base - get address from boot time settings
506 * @prefix: prefix for network device
507 * @unit: id for network device
508 *
509 * Check boot time settings for the base address of device.
510 * The found settings are set for the device to be used
511 * later in the device probing.
512 * Returns 0 if no settings found.
513 */
514unsigned long netdev_boot_base(const char *prefix, int unit)
515{
516 const struct netdev_boot_setup *s = dev_boot_setup;
517 char name[IFNAMSIZ];
518 int i;
519
520 sprintf(name, "%s%d", prefix, unit);
521
522 /*
523 * If device already registered then return base of 1
524 * to indicate not to probe for this interface
525 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700526 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 return 1;
528
529 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
530 if (!strcmp(name, s[i].name))
531 return s[i].map.base_addr;
532 return 0;
533}
534
535/*
536 * Saves at boot time configured settings for any netdevice.
537 */
538int __init netdev_boot_setup(char *str)
539{
540 int ints[5];
541 struct ifmap map;
542
543 str = get_options(str, ARRAY_SIZE(ints), ints);
544 if (!str || !*str)
545 return 0;
546
547 /* Save settings */
548 memset(&map, 0, sizeof(map));
549 if (ints[0] > 0)
550 map.irq = ints[1];
551 if (ints[0] > 1)
552 map.base_addr = ints[2];
553 if (ints[0] > 2)
554 map.mem_start = ints[3];
555 if (ints[0] > 3)
556 map.mem_end = ints[4];
557
558 /* Add new entry to the list */
559 return netdev_boot_setup_add(str, &map);
560}
561
562__setup("netdev=", netdev_boot_setup);
563
564/*******************************************************************************
565
566 Device Interface Subroutines
567
568*******************************************************************************/
569
570/**
571 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700572 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 * @name: name to find
574 *
575 * Find an interface by name. Must be called under RTNL semaphore
576 * or @dev_base_lock. If the name is found a pointer to the device
577 * is returned. If the name is not found then %NULL is returned. The
578 * reference counters are not incremented so the caller must be
579 * careful with locks.
580 */
581
Eric W. Biederman881d9662007-09-17 11:56:21 -0700582struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
584 struct hlist_node *p;
585
Eric W. Biederman881d9662007-09-17 11:56:21 -0700586 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct net_device *dev
588 = hlist_entry(p, struct net_device, name_hlist);
589 if (!strncmp(dev->name, name, IFNAMSIZ))
590 return dev;
591 }
592 return NULL;
593}
594
595/**
596 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700597 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 * @name: name to find
599 *
600 * Find an interface by name. This can be called from any
601 * context and does its own locking. The returned handle has
602 * the usage count incremented and the caller must use dev_put() to
603 * release it when it is no longer needed. %NULL is returned if no
604 * matching device is found.
605 */
606
Eric W. Biederman881d9662007-09-17 11:56:21 -0700607struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 struct net_device *dev;
610
611 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700612 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (dev)
614 dev_hold(dev);
615 read_unlock(&dev_base_lock);
616 return dev;
617}
618
619/**
620 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700621 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 * @ifindex: index of device
623 *
624 * Search for an interface by index. Returns %NULL if the device
625 * is not found or a pointer to the device. The device has not
626 * had its reference counter increased so the caller must be careful
627 * about locking. The caller must hold either the RTNL semaphore
628 * or @dev_base_lock.
629 */
630
Eric W. Biederman881d9662007-09-17 11:56:21 -0700631struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 struct hlist_node *p;
634
Eric W. Biederman881d9662007-09-17 11:56:21 -0700635 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 struct net_device *dev
637 = hlist_entry(p, struct net_device, index_hlist);
638 if (dev->ifindex == ifindex)
639 return dev;
640 }
641 return NULL;
642}
643
644
645/**
646 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @ifindex: index of device
649 *
650 * Search for an interface by index. Returns NULL if the device
651 * is not found or a pointer to the device. The device returned has
652 * had a reference added and the pointer is safe until the user calls
653 * dev_put to indicate they have finished with it.
654 */
655
Eric W. Biederman881d9662007-09-17 11:56:21 -0700656struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
658 struct net_device *dev;
659
660 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700661 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (dev)
663 dev_hold(dev);
664 read_unlock(&dev_base_lock);
665 return dev;
666}
667
668/**
669 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700670 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * @type: media type of device
672 * @ha: hardware address
673 *
674 * Search for an interface by MAC address. Returns NULL if the device
675 * is not found or a pointer to the device. The caller must hold the
676 * rtnl semaphore. The returned device has not had its ref count increased
677 * and the caller must therefore be careful about locking
678 *
679 * BUGS:
680 * If the API was consistent this would be __dev_get_by_hwaddr
681 */
682
Eric W. Biederman881d9662007-09-17 11:56:21 -0700683struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 struct net_device *dev;
686
687 ASSERT_RTNL();
688
Denis V. Lunev81103a52007-12-12 10:47:38 -0800689 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (dev->type == type &&
691 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700692 return dev;
693
694 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300697EXPORT_SYMBOL(dev_getbyhwaddr);
698
Eric W. Biederman881d9662007-09-17 11:56:21 -0700699struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700{
701 struct net_device *dev;
702
703 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700705 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700706 return dev;
707
708 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700709}
710
711EXPORT_SYMBOL(__dev_getfirstbyhwtype);
712
Eric W. Biederman881d9662007-09-17 11:56:21 -0700713struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
715 struct net_device *dev;
716
717 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700718 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719 if (dev)
720 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 rtnl_unlock();
722 return dev;
723}
724
725EXPORT_SYMBOL(dev_getfirstbyhwtype);
726
727/**
728 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700729 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 * @if_flags: IFF_* values
731 * @mask: bitmask of bits in if_flags to check
732 *
733 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900734 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 * had a reference added and the pointer is safe until the user calls
736 * dev_put to indicate they have finished with it.
737 */
738
Eric W. Biederman881d9662007-09-17 11:56:21 -0700739struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Pavel Emelianov7562f872007-05-03 15:13:45 -0700743 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (((dev->flags ^ if_flags) & mask) == 0) {
747 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700748 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 break;
750 }
751 }
752 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
756/**
757 * dev_valid_name - check if name is okay for network device
758 * @name: name string
759 *
760 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 * to allow sysfs to work. We also disallow any kind of
762 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800764int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700766 if (*name == '\0')
767 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700768 if (strlen(name) >= IFNAMSIZ)
769 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700770 if (!strcmp(name, ".") || !strcmp(name, ".."))
771 return 0;
772
773 while (*name) {
774 if (*name == '/' || isspace(*name))
775 return 0;
776 name++;
777 }
778 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200782 * __dev_alloc_name - allocate a name for a device
783 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200785 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 *
787 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700788 * id. It scans list of devices to build up a free map, then chooses
789 * the first empty slot. The caller must hold the dev_base or rtnl lock
790 * while allocating the name and adding the device in order to avoid
791 * duplicates.
792 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
793 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200796static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
798 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 const char *p;
800 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700801 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct net_device *d;
803
804 p = strnchr(name, IFNAMSIZ-1, '%');
805 if (p) {
806 /*
807 * Verify the string as this thing may have come from
808 * the user. There must be either one "%d" and no other "%"
809 * characters.
810 */
811 if (p[1] != 'd' || strchr(p + 2, '%'))
812 return -EINVAL;
813
814 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700815 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (!inuse)
817 return -ENOMEM;
818
Eric W. Biederman881d9662007-09-17 11:56:21 -0700819 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 if (!sscanf(d->name, name, &i))
821 continue;
822 if (i < 0 || i >= max_netdevices)
823 continue;
824
825 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 if (!strncmp(buf, d->name, IFNAMSIZ))
828 set_bit(i, inuse);
829 }
830
831 i = find_first_zero_bit(inuse, max_netdevices);
832 free_page((unsigned long) inuse);
833 }
834
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200835 snprintf(buf, IFNAMSIZ, name, i);
836 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /* It is possible to run out of possible slots
840 * when the name is long and there isn't enough space left
841 * for the digits, or if all bits are used.
842 */
843 return -ENFILE;
844}
845
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200846/**
847 * dev_alloc_name - allocate a name for a device
848 * @dev: device
849 * @name: name format string
850 *
851 * Passed a format string - eg "lt%d" it will try and find a suitable
852 * id. It scans list of devices to build up a free map, then chooses
853 * the first empty slot. The caller must hold the dev_base or rtnl lock
854 * while allocating the name and adding the device in order to avoid
855 * duplicates.
856 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
857 * Returns the number of the unit assigned or a negative errno code.
858 */
859
860int dev_alloc_name(struct net_device *dev, const char *name)
861{
862 char buf[IFNAMSIZ];
863 struct net *net;
864 int ret;
865
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900866 BUG_ON(!dev_net(dev));
867 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200868 ret = __dev_alloc_name(net, name, buf);
869 if (ret >= 0)
870 strlcpy(dev->name, buf, IFNAMSIZ);
871 return ret;
872}
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875/**
876 * dev_change_name - change name of a device
877 * @dev: device
878 * @newname: name (or format string) must be at least IFNAMSIZ
879 *
880 * Change name of a device, can pass format strings "eth%d".
881 * for wildcarding.
882 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700883int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700887 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700888 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900893 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (dev->flags & IFF_UP)
895 return -EBUSY;
896
897 if (!dev_valid_name(newname))
898 return -EINVAL;
899
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700900 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
901 return 0;
902
Herbert Xufcc5a032007-07-30 17:03:38 -0700903 memcpy(oldname, dev->name, IFNAMSIZ);
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (strchr(newname, '%')) {
906 err = dev_alloc_name(dev, newname);
907 if (err < 0)
908 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700910 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -EEXIST;
912 else
913 strlcpy(dev->name, newname, IFNAMSIZ);
914
Herbert Xufcc5a032007-07-30 17:03:38 -0700915rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700916 /* For now only devices in the initial network namespace
917 * are in sysfs.
918 */
919 if (net == &init_net) {
920 ret = device_rename(&dev->dev, dev->name);
921 if (ret) {
922 memcpy(dev->name, oldname, IFNAMSIZ);
923 return ret;
924 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700925 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700926
927 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600928 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700929 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700930 write_unlock_bh(&dev_base_lock);
931
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700932 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700933 ret = notifier_to_errno(ret);
934
935 if (ret) {
936 if (err) {
937 printk(KERN_ERR
938 "%s: name change rollback failed: %d.\n",
939 dev->name, ret);
940 } else {
941 err = ret;
942 memcpy(dev->name, oldname, IFNAMSIZ);
943 goto rollback;
944 }
945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 return err;
948}
949
950/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700951 * dev_set_alias - change ifalias of a device
952 * @dev: device
953 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700954 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700955 *
956 * Set ifalias for a device,
957 */
958int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
959{
960 ASSERT_RTNL();
961
962 if (len >= IFALIASZ)
963 return -EINVAL;
964
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700965 if (!len) {
966 if (dev->ifalias) {
967 kfree(dev->ifalias);
968 dev->ifalias = NULL;
969 }
970 return 0;
971 }
972
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
974 if (!dev->ifalias)
975 return -ENOMEM;
976
977 strlcpy(dev->ifalias, alias, len+1);
978 return len;
979}
980
981
982/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700983 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700984 * @dev: device to cause notification
985 *
986 * Called to indicate a device has changed features.
987 */
988void netdev_features_change(struct net_device *dev)
989{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700990 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700991}
992EXPORT_SYMBOL(netdev_features_change);
993
994/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 * netdev_state_change - device changes state
996 * @dev: device to cause notification
997 *
998 * Called to indicate a device has changed state. This function calls
999 * the notifier chains for netdev_chain and sends a NEWLINK message
1000 * to the routing socket.
1001 */
1002void netdev_state_change(struct net_device *dev)
1003{
1004 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001005 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 }
1008}
1009
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001010void netdev_bonding_change(struct net_device *dev)
1011{
1012 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1013}
1014EXPORT_SYMBOL(netdev_bonding_change);
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016/**
1017 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001018 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 * @name: name of interface
1020 *
1021 * If a network interface is not present and the process has suitable
1022 * privileges this function loads the module. If module loading is not
1023 * available in this kernel then it becomes a nop.
1024 */
1025
Eric W. Biederman881d9662007-09-17 11:56:21 -07001026void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001028 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001031 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 read_unlock(&dev_base_lock);
1033
1034 if (!dev && capable(CAP_SYS_MODULE))
1035 request_module("%s", name);
1036}
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038/**
1039 * dev_open - prepare an interface for use.
1040 * @dev: device to open
1041 *
1042 * Takes a device from down to up state. The device's private open
1043 * function is invoked and then the multicast lists are loaded. Finally
1044 * the device is moved into the up state and a %NETDEV_UP message is
1045 * sent to the netdev notifier chain.
1046 *
1047 * Calling this function on an active interface is a nop. On a failure
1048 * a negative errno code is returned.
1049 */
1050int dev_open(struct net_device *dev)
1051{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001052 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001053 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001055 ASSERT_RTNL();
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 /*
1058 * Is it already up?
1059 */
1060
1061 if (dev->flags & IFF_UP)
1062 return 0;
1063
1064 /*
1065 * Is it even present?
1066 */
1067 if (!netif_device_present(dev))
1068 return -ENODEV;
1069
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /*
1076 * Call device private open method
1077 */
1078 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001079
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001080 if (ops->ndo_validate_addr)
1081 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001082
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001083 if (!ret && ops->ndo_open)
1084 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001086 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * If it went open OK then:
1088 */
1089
Jeff Garzikbada3392007-10-23 20:19:37 -07001090 if (ret)
1091 clear_bit(__LINK_STATE_START, &dev->state);
1092 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 /*
1094 * Set the flags.
1095 */
1096 dev->flags |= IFF_UP;
1097
1098 /*
Dan Williams649274d2009-01-11 00:20:39 -08001099 * Enable NET_DMA
1100 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001101 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001102
1103 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 * Initialize multicasting status
1105 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001106 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 /*
1109 * Wakeup transmit queue engine
1110 */
1111 dev_activate(dev);
1112
1113 /*
1114 * ... and announce new interface.
1115 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001116 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return ret;
1120}
1121
1122/**
1123 * dev_close - shutdown an interface.
1124 * @dev: device to shutdown
1125 *
1126 * This function moves an active device into down state. A
1127 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1128 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1129 * chain.
1130 */
1131int dev_close(struct net_device *dev)
1132{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001133 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001134 ASSERT_RTNL();
1135
David S. Miller9d5010d2007-09-12 14:33:25 +02001136 might_sleep();
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (!(dev->flags & IFF_UP))
1139 return 0;
1140
1141 /*
1142 * Tell people we are going down, so that they can
1143 * prepare to death, when device is still operating.
1144 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001145 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 clear_bit(__LINK_STATE_START, &dev->state);
1148
1149 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001150 * it can be even on different cpu. So just clear netif_running().
1151 *
1152 * dev->stop() will invoke napi_disable() on all of it's
1153 * napi_struct instances on this device.
1154 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001157 dev_deactivate(dev);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /*
1160 * Call the device specific close. This cannot fail.
1161 * Only if device is UP
1162 *
1163 * We allow it to be called even after a DETACH hot-plug
1164 * event.
1165 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001166 if (ops->ndo_stop)
1167 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 /*
1170 * Device is now down.
1171 */
1172
1173 dev->flags &= ~IFF_UP;
1174
1175 /*
1176 * Tell people we are down
1177 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001178 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Dan Williams649274d2009-01-11 00:20:39 -08001180 /*
1181 * Shutdown NET_DMA
1182 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001183 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return 0;
1186}
1187
1188
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001189/**
1190 * dev_disable_lro - disable Large Receive Offload on a device
1191 * @dev: device
1192 *
1193 * Disable Large Receive Offload (LRO) on a net device. Must be
1194 * called under RTNL. This is needed if received packets may be
1195 * forwarded to another interface.
1196 */
1197void dev_disable_lro(struct net_device *dev)
1198{
1199 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1200 dev->ethtool_ops->set_flags) {
1201 u32 flags = dev->ethtool_ops->get_flags(dev);
1202 if (flags & ETH_FLAG_LRO) {
1203 flags &= ~ETH_FLAG_LRO;
1204 dev->ethtool_ops->set_flags(dev, flags);
1205 }
1206 }
1207 WARN_ON(dev->features & NETIF_F_LRO);
1208}
1209EXPORT_SYMBOL(dev_disable_lro);
1210
1211
Eric W. Biederman881d9662007-09-17 11:56:21 -07001212static int dev_boot_phase = 1;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214/*
1215 * Device change register/unregister. These are not inline or static
1216 * as we export them to the world.
1217 */
1218
1219/**
1220 * register_netdevice_notifier - register a network notifier block
1221 * @nb: notifier
1222 *
1223 * Register a notifier to be called when network device events occur.
1224 * The notifier passed is linked into the kernel structures and must
1225 * not be reused until it has been unregistered. A negative errno code
1226 * is returned on a failure.
1227 *
1228 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001229 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 * view of the network device list.
1231 */
1232
1233int register_netdevice_notifier(struct notifier_block *nb)
1234{
1235 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001236 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 int err;
1239
1240 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001241 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001242 if (err)
1243 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001244 if (dev_boot_phase)
1245 goto unlock;
1246 for_each_net(net) {
1247 for_each_netdev(net, dev) {
1248 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1249 err = notifier_to_errno(err);
1250 if (err)
1251 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Eric W. Biederman881d9662007-09-17 11:56:21 -07001253 if (!(dev->flags & IFF_UP))
1254 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001255
Eric W. Biederman881d9662007-09-17 11:56:21 -07001256 nb->notifier_call(nb, NETDEV_UP, dev);
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001259
1260unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 rtnl_unlock();
1262 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
1264rollback:
1265 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001266 for_each_net(net) {
1267 for_each_netdev(net, dev) {
1268 if (dev == last)
1269 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001270
Eric W. Biederman881d9662007-09-17 11:56:21 -07001271 if (dev->flags & IFF_UP) {
1272 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_DOWN, dev);
1274 }
1275 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001276 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001277 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001278
1279 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001280 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
1283/**
1284 * unregister_netdevice_notifier - unregister a network notifier block
1285 * @nb: notifier
1286 *
1287 * Unregister a notifier previously registered by
1288 * register_netdevice_notifier(). The notifier is unlinked into the
1289 * kernel structures and may then be reused. A negative errno code
1290 * is returned on a failure.
1291 */
1292
1293int unregister_netdevice_notifier(struct notifier_block *nb)
1294{
Herbert Xu9f514952006-03-25 01:24:25 -08001295 int err;
1296
1297 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001298 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001299 rtnl_unlock();
1300 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303/**
1304 * call_netdevice_notifiers - call all network notifier blocks
1305 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001306 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 *
1308 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001309 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 */
1311
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001312int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001314 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
1316
1317/* When > 0 there are consumers of rx skb time stamps */
1318static atomic_t netstamp_needed = ATOMIC_INIT(0);
1319
1320void net_enable_timestamp(void)
1321{
1322 atomic_inc(&netstamp_needed);
1323}
1324
1325void net_disable_timestamp(void)
1326{
1327 atomic_dec(&netstamp_needed);
1328}
1329
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001330static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001333 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001334 else
1335 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338/*
1339 * Support routine. Sends outgoing frames to any network
1340 * taps currently in use.
1341 */
1342
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001343static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
1345 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001346
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001347#ifdef CONFIG_NET_CLS_ACT
1348 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1349 net_timestamp(skb);
1350#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001351 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001352#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 rcu_read_lock();
1355 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1356 /* Never send packets back to the socket
1357 * they originated from - MvS (miquels@drinkel.ow.org)
1358 */
1359 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1363 if (!skb2)
1364 break;
1365
1366 /* skb->nh should be correctly
1367 set by sender, so that the second statement is
1368 just protection against buggy protocols.
1369 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001370 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001372 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001373 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (net_ratelimit())
1375 printk(KERN_CRIT "protocol %04x is "
1376 "buggy, dev %s\n",
1377 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001378 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
1380
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001381 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001383 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 }
1385 }
1386 rcu_read_unlock();
1387}
1388
Denis Vlasenko56079432006-03-29 15:57:29 -08001389
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001390static inline void __netif_reschedule(struct Qdisc *q)
1391{
1392 struct softnet_data *sd;
1393 unsigned long flags;
1394
1395 local_irq_save(flags);
1396 sd = &__get_cpu_var(softnet_data);
1397 q->next_sched = sd->output_queue;
1398 sd->output_queue = q;
1399 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1400 local_irq_restore(flags);
1401}
1402
David S. Miller37437bb2008-07-16 02:15:04 -07001403void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001405 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1406 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001407}
1408EXPORT_SYMBOL(__netif_schedule);
1409
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001410void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001411{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001412 if (atomic_dec_and_test(&skb->users)) {
1413 struct softnet_data *sd;
1414 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 local_irq_save(flags);
1417 sd = &__get_cpu_var(softnet_data);
1418 skb->next = sd->completion_queue;
1419 sd->completion_queue = skb;
1420 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1421 local_irq_restore(flags);
1422 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001423}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001424EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001425
1426void dev_kfree_skb_any(struct sk_buff *skb)
1427{
1428 if (in_irq() || irqs_disabled())
1429 dev_kfree_skb_irq(skb);
1430 else
1431 dev_kfree_skb(skb);
1432}
1433EXPORT_SYMBOL(dev_kfree_skb_any);
1434
1435
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001436/**
1437 * netif_device_detach - mark device as removed
1438 * @dev: network device
1439 *
1440 * Mark device as removed from system and therefore no longer available.
1441 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001442void netif_device_detach(struct net_device *dev)
1443{
1444 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1445 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001446 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001447 }
1448}
1449EXPORT_SYMBOL(netif_device_detach);
1450
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001451/**
1452 * netif_device_attach - mark device as attached
1453 * @dev: network device
1454 *
1455 * Mark device as attached from system and restart if needed.
1456 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001457void netif_device_attach(struct net_device *dev)
1458{
1459 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1460 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001461 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001463 }
1464}
1465EXPORT_SYMBOL(netif_device_attach);
1466
Ben Hutchings6de329e2008-06-16 17:02:28 -07001467static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1468{
1469 return ((features & NETIF_F_GEN_CSUM) ||
1470 ((features & NETIF_F_IP_CSUM) &&
1471 protocol == htons(ETH_P_IP)) ||
1472 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001473 protocol == htons(ETH_P_IPV6)) ||
1474 ((features & NETIF_F_FCOE_CRC) &&
1475 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001476}
1477
1478static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1479{
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1481 return true;
1482
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1487 return true;
1488 }
1489
1490 return false;
1491}
Denis Vlasenko56079432006-03-29 15:57:29 -08001492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493/*
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1496 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001497int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
Al Virod3bc23e2006-11-14 21:24:49 -08001499 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001500 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Patrick McHardy84fa7932006-08-29 16:44:56 -07001502 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001503 goto out_set_summed;
1504
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509
Herbert Xua0308472007-10-15 01:47:15 -07001510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1513
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1516
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1520 if (ret)
1521 goto out;
1522 }
1523
Herbert Xua0308472007-10-15 01:47:15 -07001524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001525out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001527out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return ret;
1529}
1530
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001531/**
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001534 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535 *
1536 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001537 *
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001541struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001542{
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001545 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001546 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001547
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001548 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001549 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001550 __skb_pull(skb, skb->mac_len);
1551
Herbert Xu67fd1a72009-01-19 16:26:44 -08001552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1555
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1558
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1560 "ip_summed=%d",
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1564
Herbert Xua430a432006-07-08 13:34:56 -07001565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1568 }
1569
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001570 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1578 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001581 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001582 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 break;
1584 }
1585 }
1586 rcu_read_unlock();
1587
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001588 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001589
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001590 return segs;
1591}
1592
1593EXPORT_SYMBOL(skb_gso_segment);
1594
Herbert Xufb286bb2005-11-10 13:01:24 -08001595/* Take action when hardware reception checksum errors are detected. */
1596#ifdef CONFIG_BUG
1597void netdev_rx_csum_fault(struct net_device *dev)
1598{
1599 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001600 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001601 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001602 dump_stack();
1603 }
1604}
1605EXPORT_SYMBOL(netdev_rx_csum_fault);
1606#endif
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608/* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1611 */
1612
1613static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1614{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001615#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 int i;
1617
1618 if (dev->features & NETIF_F_HIGHDMA)
1619 return 0;
1620
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1623 return 1;
1624
Herbert Xu3d3a8532006-06-27 13:33:10 -07001625#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 return 0;
1627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001629struct dev_gso_cb {
1630 void (*destructor)(struct sk_buff *skb);
1631};
1632
1633#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1634
1635static void dev_gso_skb_destructor(struct sk_buff *skb)
1636{
1637 struct dev_gso_cb *cb;
1638
1639 do {
1640 struct sk_buff *nskb = skb->next;
1641
1642 skb->next = nskb->next;
1643 nskb->next = NULL;
1644 kfree_skb(nskb);
1645 } while (skb->next);
1646
1647 cb = DEV_GSO_CB(skb);
1648 if (cb->destructor)
1649 cb->destructor(skb);
1650}
1651
1652/**
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1655 *
1656 * This function segments the given skb and stores the list of segments
1657 * in skb->next.
1658 */
1659static int dev_gso_segment(struct sk_buff *skb)
1660{
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1664 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665
Herbert Xu576a30e2006-06-27 13:22:38 -07001666 segs = skb_gso_segment(skb, features);
1667
1668 /* Verifying header integrity only. */
1669 if (!segs)
1670 return 0;
1671
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001672 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001673 return PTR_ERR(segs);
1674
1675 skb->next = segs;
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1678
1679 return 0;
1680}
1681
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001682int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001684{
Stephen Hemminger00829822008-11-20 20:14:53 -08001685 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001686 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001687
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001688 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001689 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001690 dev_queue_xmit_nit(skb, dev);
1691
Herbert Xu576a30e2006-06-27 13:22:38 -07001692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1694 goto out_kfree_skb;
1695 if (skb->next)
1696 goto gso;
1697 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698
Eric Dumazet93f154b2009-05-18 22:19:19 -07001699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
Patrick Ohlyac45f602009-02-12 05:03:37 +00001706 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001707 if (rc == 0)
1708 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001709 /*
1710 * TODO: if skb_orphan() was called by
1711 * dev->hard_start_xmit() (for example, the unmodified
1712 * igb driver does that; bnx2 doesn't), then
1713 * skb_tx_software_timestamp() will be unable to send
1714 * back the time stamp.
1715 *
1716 * How can this be prevented? Always create another
1717 * reference to the socket before calling
1718 * dev->hard_start_xmit()? Prevent that skb_orphan()
1719 * does anything in dev->hard_start_xmit() by clearing
1720 * the skb destructor before the call and restoring it
1721 * afterwards, then doing the skb_orphan() ourselves?
1722 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001723 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724 }
1725
Herbert Xu576a30e2006-06-27 13:22:38 -07001726gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 do {
1728 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729
1730 skb->next = nskb->next;
1731 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001732 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 skb->next = nskb;
1736 return rc;
1737 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001738 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001740 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001742
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001743 skb->destructor = DEV_GSO_CB(skb)->destructor;
1744
1745out_kfree_skb:
1746 kfree_skb(skb);
1747 return 0;
1748}
1749
David S. Miller70192982009-01-27 16:34:47 -08001750static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001751
Stephen Hemminger92477442009-03-21 13:39:26 -07001752u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001753{
David S. Miller70192982009-01-27 16:34:47 -08001754 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001755
David S. Miller513de112009-05-03 14:43:10 -07001756 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001762
1763 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001764 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001765 else
David S. Miller70192982009-01-27 16:34:47 -08001766 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001767
David S. Miller70192982009-01-27 16:34:47 -08001768 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001769
David S. Millerb6b2fed2008-07-21 09:48:06 -07001770 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001771}
Stephen Hemminger92477442009-03-21 13:39:26 -07001772EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001773
David S. Millere8a04642008-07-17 00:34:19 -07001774static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1775 struct sk_buff *skb)
1776{
Stephen Hemminger00829822008-11-20 20:14:53 -08001777 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001778 u16 queue_index = 0;
1779
Stephen Hemminger00829822008-11-20 20:14:53 -08001780 if (ops->ndo_select_queue)
1781 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001782 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001783 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001784
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001785 skb_set_queue_mapping(skb, queue_index);
1786 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001787}
1788
Dave Jonesd29f7492008-07-22 14:09:06 -07001789/**
1790 * dev_queue_xmit - transmit a buffer
1791 * @skb: buffer to transmit
1792 *
1793 * Queue a buffer for transmission to a network device. The caller must
1794 * have set the device and priority and built the buffer before calling
1795 * this function. The function can be called from an interrupt.
1796 *
1797 * A negative errno code is returned on a failure. A success does not
1798 * guarantee the frame will be transmitted as it may be dropped due
1799 * to congestion or traffic shaping.
1800 *
1801 * -----------------------------------------------------------------------------------
1802 * I notice this method can also return errors from the queue disciplines,
1803 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1804 * be positive.
1805 *
1806 * Regardless of the return value, the skb is consumed, so it is currently
1807 * difficult to retry a send to this method. (You can bump the ref count
1808 * before sending to hold a reference for retry if you are careful.)
1809 *
1810 * When calling this method, interrupts MUST be enabled. This is because
1811 * the BH enable code must have IRQs enabled so that it will not deadlock.
1812 * --BLG
1813 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814int dev_queue_xmit(struct sk_buff *skb)
1815{
1816 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001817 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 struct Qdisc *q;
1819 int rc = -ENOMEM;
1820
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001821 /* GSO will handle the following emulations directly. */
1822 if (netif_needs_gso(dev, skb))
1823 goto gso;
1824
David S. Miller4cf704f2009-06-09 00:18:51 -07001825 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001827 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 goto out_kfree_skb;
1829
1830 /* Fragmented skb is linearized if device does not support SG,
1831 * or if at least one of fragments is in highmem and device
1832 * does not support DMA from it.
1833 */
1834 if (skb_shinfo(skb)->nr_frags &&
1835 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001836 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 goto out_kfree_skb;
1838
1839 /* If packet is not checksummed and device does not support
1840 * checksumming for this protocol, complete checksumming here.
1841 */
Herbert Xu663ead32007-04-09 11:59:07 -07001842 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1843 skb_set_transport_header(skb, skb->csum_start -
1844 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001845 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1846 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001849gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001850 /* Disable soft irqs for various locks below. Also
1851 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001853 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
David S. Millereae792b2008-07-15 03:03:33 -07001855 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001856 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001857
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858#ifdef CONFIG_NET_CLS_ACT
1859 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1860#endif
1861 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001862 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
David S. Miller37437bb2008-07-16 02:15:04 -07001864 spin_lock(root_lock);
1865
David S. Millera9312ae2008-08-17 21:51:03 -07001866 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001867 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001868 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001869 } else {
1870 rc = qdisc_enqueue_root(skb, q);
1871 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001872 }
David S. Miller37437bb2008-07-16 02:15:04 -07001873 spin_unlock(root_lock);
1874
David S. Miller37437bb2008-07-16 02:15:04 -07001875 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 }
1877
1878 /* The device has no queue. Common case for software devices:
1879 loopback, all the sorts of tunnels...
1880
Herbert Xu932ff272006-06-09 12:20:56 -07001881 Really, it is unlikely that netif_tx_lock protection is necessary
1882 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 counters.)
1884 However, it is possible, that they rely on protection
1885 made by us here.
1886
1887 Check this and shot the lock. It is not prone from deadlocks.
1888 Either shot noqueue qdisc, it is even simpler 8)
1889 */
1890 if (dev->flags & IFF_UP) {
1891 int cpu = smp_processor_id(); /* ok because BHs are off */
1892
David S. Millerc773e842008-07-08 23:13:53 -07001893 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
David S. Millerc773e842008-07-08 23:13:53 -07001895 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001897 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001899 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001900 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 goto out;
1902 }
1903 }
David S. Millerc773e842008-07-08 23:13:53 -07001904 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 if (net_ratelimit())
1906 printk(KERN_CRIT "Virtual device %s asks to "
1907 "queue packet!\n", dev->name);
1908 } else {
1909 /* Recursion is detected! It is possible,
1910 * unfortunately */
1911 if (net_ratelimit())
1912 printk(KERN_CRIT "Dead loop on virtual device "
1913 "%s, fix it urgently!\n", dev->name);
1914 }
1915 }
1916
1917 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001918 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
1920out_kfree_skb:
1921 kfree_skb(skb);
1922 return rc;
1923out:
Herbert Xud4828d82006-06-22 02:28:18 -07001924 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return rc;
1926}
1927
1928
1929/*=======================================================================
1930 Receiver routines
1931 =======================================================================*/
1932
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001933int netdev_max_backlog __read_mostly = 1000;
1934int netdev_budget __read_mostly = 300;
1935int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
1937DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1938
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940/**
1941 * netif_rx - post buffer to the network code
1942 * @skb: buffer to post
1943 *
1944 * This function receives a packet from a device driver and queues it for
1945 * the upper (protocol) levels to process. It always succeeds. The buffer
1946 * may be dropped during processing for congestion control or by the
1947 * protocol layers.
1948 *
1949 * return values:
1950 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 * NET_RX_DROP (packet was dropped)
1952 *
1953 */
1954
1955int netif_rx(struct sk_buff *skb)
1956{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 struct softnet_data *queue;
1958 unsigned long flags;
1959
1960 /* if netpoll wants it, pretend we never saw it */
1961 if (netpoll_rx(skb))
1962 return NET_RX_DROP;
1963
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001964 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001965 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
1967 /*
1968 * The code is rearranged so that the path is the most
1969 * short when CPU is congested, but is still operating.
1970 */
1971 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 queue = &__get_cpu_var(softnet_data);
1973
1974 __get_cpu_var(netdev_rx_stat).total++;
1975 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1976 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001980 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 }
1982
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001983 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 goto enqueue;
1985 }
1986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 __get_cpu_var(netdev_rx_stat).dropped++;
1988 local_irq_restore(flags);
1989
1990 kfree_skb(skb);
1991 return NET_RX_DROP;
1992}
1993
1994int netif_rx_ni(struct sk_buff *skb)
1995{
1996 int err;
1997
1998 preempt_disable();
1999 err = netif_rx(skb);
2000 if (local_softirq_pending())
2001 do_softirq();
2002 preempt_enable();
2003
2004 return err;
2005}
2006
2007EXPORT_SYMBOL(netif_rx_ni);
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009static void net_tx_action(struct softirq_action *h)
2010{
2011 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2012
2013 if (sd->completion_queue) {
2014 struct sk_buff *clist;
2015
2016 local_irq_disable();
2017 clist = sd->completion_queue;
2018 sd->completion_queue = NULL;
2019 local_irq_enable();
2020
2021 while (clist) {
2022 struct sk_buff *skb = clist;
2023 clist = clist->next;
2024
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002025 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 __kfree_skb(skb);
2027 }
2028 }
2029
2030 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002031 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
2033 local_irq_disable();
2034 head = sd->output_queue;
2035 sd->output_queue = NULL;
2036 local_irq_enable();
2037
2038 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002039 struct Qdisc *q = head;
2040 spinlock_t *root_lock;
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 head = head->next_sched;
2043
David S. Miller5fb66222008-08-02 20:02:43 -07002044 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002045 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002046 smp_mb__before_clear_bit();
2047 clear_bit(__QDISC_STATE_SCHED,
2048 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002049 qdisc_run(q);
2050 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002052 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002053 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002054 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002055 } else {
2056 smp_mb__before_clear_bit();
2057 clear_bit(__QDISC_STATE_SCHED,
2058 &q->state);
2059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 }
2061 }
2062 }
2063}
2064
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002065static inline int deliver_skb(struct sk_buff *skb,
2066 struct packet_type *pt_prev,
2067 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002070 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
2073#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002074
2075#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2076/* This hook is defined here for ATM LANE */
2077int (*br_fdb_test_addr_hook)(struct net_device *dev,
2078 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook);
2080#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Stephen Hemminger6229e362007-03-21 13:38:47 -07002082/*
2083 * If bridge module is loaded call bridging hook.
2084 * returns NULL if packet was consumed.
2085 */
2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2087 struct sk_buff *skb) __read_mostly;
Michał Mirosławda678292009-06-05 05:35:28 +00002088EXPORT_SYMBOL(br_handle_frame_hook);
2089
Stephen Hemminger6229e362007-03-21 13:38:47 -07002090static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2091 struct packet_type **pt_prev, int *ret,
2092 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
2094 struct net_bridge_port *port;
2095
Stephen Hemminger6229e362007-03-21 13:38:47 -07002096 if (skb->pkt_type == PACKET_LOOPBACK ||
2097 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2098 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002101 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002103 }
2104
Stephen Hemminger6229e362007-03-21 13:38:47 -07002105 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002108#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109#endif
2110
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002111#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2112struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2113EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2114
2115static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2116 struct packet_type **pt_prev,
2117 int *ret,
2118 struct net_device *orig_dev)
2119{
2120 if (skb->dev->macvlan_port == NULL)
2121 return skb;
2122
2123 if (*pt_prev) {
2124 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2125 *pt_prev = NULL;
2126 }
2127 return macvlan_handle_frame_hook(skb);
2128}
2129#else
2130#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2131#endif
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133#ifdef CONFIG_NET_CLS_ACT
2134/* TODO: Maybe we should just force sch_ingress to be compiled in
2135 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2136 * a compare and 2 stores extra right now if we dont have it on
2137 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002138 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 * the ingress scheduler, you just cant add policies on ingress.
2140 *
2141 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002142static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002145 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002146 struct netdev_queue *rxq;
2147 int result = TC_ACT_OK;
2148 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002149
Herbert Xuf697c3e2007-10-14 00:38:47 -07002150 if (MAX_RED_LOOP < ttl++) {
2151 printk(KERN_WARNING
2152 "Redir loop detected Dropping packet (%d->%d)\n",
2153 skb->iif, dev->ifindex);
2154 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
Herbert Xuf697c3e2007-10-14 00:38:47 -07002157 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2158 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2159
David S. Miller555353c2008-07-08 17:33:13 -07002160 rxq = &dev->rx_queue;
2161
David S. Miller83874002008-07-17 00:53:03 -07002162 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002163 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002164 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002165 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2166 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002167 spin_unlock(qdisc_lock(q));
2168 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return result;
2171}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002172
2173static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2174 struct packet_type **pt_prev,
2175 int *ret, struct net_device *orig_dev)
2176{
David S. Miller8d50b532008-07-30 02:37:46 -07002177 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002178 goto out;
2179
2180 if (*pt_prev) {
2181 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2182 *pt_prev = NULL;
2183 } else {
2184 /* Huh? Why does turning on AF_PACKET affect this? */
2185 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2186 }
2187
2188 switch (ing_filter(skb)) {
2189 case TC_ACT_SHOT:
2190 case TC_ACT_STOLEN:
2191 kfree_skb(skb);
2192 return NULL;
2193 }
2194
2195out:
2196 skb->tc_verd = 0;
2197 return skb;
2198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199#endif
2200
Patrick McHardybc1d0412008-07-14 22:49:30 -07002201/*
2202 * netif_nit_deliver - deliver received packets to network taps
2203 * @skb: buffer
2204 *
2205 * This function is used to deliver incoming packets to network
2206 * taps. It should be used when the normal netif_receive_skb path
2207 * is bypassed, for example because of VLAN acceleration.
2208 */
2209void netif_nit_deliver(struct sk_buff *skb)
2210{
2211 struct packet_type *ptype;
2212
2213 if (list_empty(&ptype_all))
2214 return;
2215
2216 skb_reset_network_header(skb);
2217 skb_reset_transport_header(skb);
2218 skb->mac_len = skb->network_header - skb->mac_header;
2219
2220 rcu_read_lock();
2221 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2222 if (!ptype->dev || ptype->dev == skb->dev)
2223 deliver_skb(skb, ptype, skb->dev);
2224 }
2225 rcu_read_unlock();
2226}
2227
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002228/**
2229 * netif_receive_skb - process receive buffer from network
2230 * @skb: buffer to process
2231 *
2232 * netif_receive_skb() is the main receive data processing function.
2233 * It always succeeds. The buffer may be dropped during processing
2234 * for congestion control or by the protocol layers.
2235 *
2236 * This function may only be called from softirq context and interrupts
2237 * should be enabled.
2238 *
2239 * Return values (usually ignored):
2240 * NET_RX_SUCCESS: no congestion
2241 * NET_RX_DROP: packet was dropped
2242 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243int netif_receive_skb(struct sk_buff *skb)
2244{
2245 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002246 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002247 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002249 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002251 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2252 return NET_RX_SUCCESS;
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002255 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 return NET_RX_DROP;
2257
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002258 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002259 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Patrick McHardyc01003c2007-03-29 11:46:52 -07002261 if (!skb->iif)
2262 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002263
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002264 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002265 orig_dev = skb->dev;
2266 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002267 if (skb_bond_should_drop(skb))
2268 null_or_orig = orig_dev; /* deliver only exact match */
2269 else
2270 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002271 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 __get_cpu_var(netdev_rx_stat).total++;
2274
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002275 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002276 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002277 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 pt_prev = NULL;
2280
2281 rcu_read_lock();
2282
2283#ifdef CONFIG_NET_CLS_ACT
2284 if (skb->tc_verd & TC_NCLS) {
2285 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2286 goto ncls;
2287 }
2288#endif
2289
2290 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002291 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2292 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002293 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002294 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 pt_prev = ptype;
2296 }
2297 }
2298
2299#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002300 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2301 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303ncls:
2304#endif
2305
Stephen Hemminger6229e362007-03-21 13:38:47 -07002306 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2307 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002309 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2310 if (!skb)
2311 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
Herbert Xu9a279bc2009-02-04 16:55:27 -08002313 skb_orphan(skb);
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002316 list_for_each_entry_rcu(ptype,
2317 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002319 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2320 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002321 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002322 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 pt_prev = ptype;
2324 }
2325 }
2326
2327 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002328 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 } else {
2330 kfree_skb(skb);
2331 /* Jamal, now you will not able to escape explaining
2332 * me how you were going to use this. :-)
2333 */
2334 ret = NET_RX_DROP;
2335 }
2336
2337out:
2338 rcu_read_unlock();
2339 return ret;
2340}
2341
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002342/* Network device is going away, flush any packets still pending */
2343static void flush_backlog(void *arg)
2344{
2345 struct net_device *dev = arg;
2346 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2347 struct sk_buff *skb, *tmp;
2348
2349 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2350 if (skb->dev == dev) {
2351 __skb_unlink(skb, &queue->input_pkt_queue);
2352 kfree_skb(skb);
2353 }
2354}
2355
Herbert Xud565b0a2008-12-15 23:38:52 -08002356static int napi_gro_complete(struct sk_buff *skb)
2357{
2358 struct packet_type *ptype;
2359 __be16 type = skb->protocol;
2360 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2361 int err = -ENOENT;
2362
Herbert Xufc59f9a2009-04-14 15:11:06 -07002363 if (NAPI_GRO_CB(skb)->count == 1) {
2364 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002365 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002366 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002367
2368 rcu_read_lock();
2369 list_for_each_entry_rcu(ptype, head, list) {
2370 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2371 continue;
2372
2373 err = ptype->gro_complete(skb);
2374 break;
2375 }
2376 rcu_read_unlock();
2377
2378 if (err) {
2379 WARN_ON(&ptype->list == head);
2380 kfree_skb(skb);
2381 return NET_RX_SUCCESS;
2382 }
2383
2384out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002385 return netif_receive_skb(skb);
2386}
2387
2388void napi_gro_flush(struct napi_struct *napi)
2389{
2390 struct sk_buff *skb, *next;
2391
2392 for (skb = napi->gro_list; skb; skb = next) {
2393 next = skb->next;
2394 skb->next = NULL;
2395 napi_gro_complete(skb);
2396 }
2397
Herbert Xu4ae55442009-02-08 18:00:36 +00002398 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002399 napi->gro_list = NULL;
2400}
2401EXPORT_SYMBOL(napi_gro_flush);
2402
Herbert Xu96e93ea2009-01-06 10:49:34 -08002403int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002404{
2405 struct sk_buff **pp = NULL;
2406 struct packet_type *ptype;
2407 __be16 type = skb->protocol;
2408 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002409 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002410 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002411 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002412
2413 if (!(skb->dev->features & NETIF_F_GRO))
2414 goto normal;
2415
David S. Miller4cf704f2009-06-09 00:18:51 -07002416 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002417 goto normal;
2418
Herbert Xud565b0a2008-12-15 23:38:52 -08002419 rcu_read_lock();
2420 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002421 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2422 continue;
2423
Herbert Xu86911732009-01-29 14:19:50 +00002424 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002425 mac_len = skb->network_header - skb->mac_header;
2426 skb->mac_len = mac_len;
2427 NAPI_GRO_CB(skb)->same_flow = 0;
2428 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002429 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002430
Herbert Xud565b0a2008-12-15 23:38:52 -08002431 pp = ptype->gro_receive(&napi->gro_list, skb);
2432 break;
2433 }
2434 rcu_read_unlock();
2435
2436 if (&ptype->list == head)
2437 goto normal;
2438
Herbert Xu0da2afd52008-12-26 14:57:42 -08002439 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002440 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002441
Herbert Xud565b0a2008-12-15 23:38:52 -08002442 if (pp) {
2443 struct sk_buff *nskb = *pp;
2444
2445 *pp = nskb->next;
2446 nskb->next = NULL;
2447 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002448 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002449 }
2450
Herbert Xu0da2afd52008-12-26 14:57:42 -08002451 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002452 goto ok;
2453
Herbert Xu4ae55442009-02-08 18:00:36 +00002454 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002455 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002456
Herbert Xu4ae55442009-02-08 18:00:36 +00002457 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002458 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002459 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002460 skb->next = napi->gro_list;
2461 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002462 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002463
Herbert Xuad0f9902009-02-01 01:24:55 -08002464pull:
Herbert Xucb189782009-05-26 18:50:31 +00002465 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2466 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2467
2468 BUG_ON(skb->end - skb->tail < grow);
2469
2470 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2471
2472 skb->tail += grow;
2473 skb->data_len -= grow;
2474
2475 skb_shinfo(skb)->frags[0].page_offset += grow;
2476 skb_shinfo(skb)->frags[0].size -= grow;
2477
2478 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2479 put_page(skb_shinfo(skb)->frags[0].page);
2480 memmove(skb_shinfo(skb)->frags,
2481 skb_shinfo(skb)->frags + 1,
2482 --skb_shinfo(skb)->nr_frags);
2483 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002484 }
2485
Herbert Xud565b0a2008-12-15 23:38:52 -08002486ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002487 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002488
2489normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002490 ret = GRO_NORMAL;
2491 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002492}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002493EXPORT_SYMBOL(dev_gro_receive);
2494
2495static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2496{
2497 struct sk_buff *p;
2498
Herbert Xud1c76af2009-03-16 10:50:02 -07002499 if (netpoll_rx_on(skb))
2500 return GRO_NORMAL;
2501
Herbert Xu96e93ea2009-01-06 10:49:34 -08002502 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002503 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2504 && !compare_ether_header(skb_mac_header(p),
2505 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002506 NAPI_GRO_CB(p)->flush = 0;
2507 }
2508
2509 return dev_gro_receive(napi, skb);
2510}
Herbert Xu5d38a072009-01-04 16:13:40 -08002511
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002512int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002513{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002514 int err = NET_RX_SUCCESS;
2515
2516 switch (ret) {
2517 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002518 return netif_receive_skb(skb);
2519
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002520 case GRO_DROP:
2521 err = NET_RX_DROP;
2522 /* fall through */
2523
2524 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002525 kfree_skb(skb);
2526 break;
2527 }
2528
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002529 return err;
2530}
2531EXPORT_SYMBOL(napi_skb_finish);
2532
Herbert Xu78a478d2009-05-26 18:50:21 +00002533void skb_gro_reset_offset(struct sk_buff *skb)
2534{
2535 NAPI_GRO_CB(skb)->data_offset = 0;
2536 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002537 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002538
Herbert Xu78d3fd02009-05-26 18:50:23 +00002539 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002540 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002541 NAPI_GRO_CB(skb)->frag0 =
2542 page_address(skb_shinfo(skb)->frags[0].page) +
2543 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002544 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2545 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002546}
2547EXPORT_SYMBOL(skb_gro_reset_offset);
2548
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002549int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2550{
Herbert Xu86911732009-01-29 14:19:50 +00002551 skb_gro_reset_offset(skb);
2552
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002553 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002554}
2555EXPORT_SYMBOL(napi_gro_receive);
2556
Herbert Xu96e93ea2009-01-06 10:49:34 -08002557void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2558{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002559 __skb_pull(skb, skb_headlen(skb));
2560 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2561
2562 napi->skb = skb;
2563}
2564EXPORT_SYMBOL(napi_reuse_skb);
2565
Herbert Xu76620aa2009-04-16 02:02:07 -07002566struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002567{
2568 struct net_device *dev = napi->dev;
2569 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002570
2571 if (!skb) {
2572 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2573 if (!skb)
2574 goto out;
2575
2576 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002577
2578 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002579 }
2580
Herbert Xu96e93ea2009-01-06 10:49:34 -08002581out:
2582 return skb;
2583}
Herbert Xu76620aa2009-04-16 02:02:07 -07002584EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002585
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002586int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2587{
2588 int err = NET_RX_SUCCESS;
2589
2590 switch (ret) {
2591 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002592 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002593 skb->protocol = eth_type_trans(skb, napi->dev);
2594
2595 if (ret == GRO_NORMAL)
2596 return netif_receive_skb(skb);
2597
2598 skb_gro_pull(skb, -ETH_HLEN);
2599 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002600
2601 case GRO_DROP:
2602 err = NET_RX_DROP;
2603 /* fall through */
2604
2605 case GRO_MERGED_FREE:
2606 napi_reuse_skb(napi, skb);
2607 break;
2608 }
2609
2610 return err;
2611}
2612EXPORT_SYMBOL(napi_frags_finish);
2613
Herbert Xu76620aa2009-04-16 02:02:07 -07002614struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002615{
Herbert Xu76620aa2009-04-16 02:02:07 -07002616 struct sk_buff *skb = napi->skb;
2617 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002618 unsigned int hlen;
2619 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002620
2621 napi->skb = NULL;
2622
2623 skb_reset_mac_header(skb);
2624 skb_gro_reset_offset(skb);
2625
Herbert Xua5b1cf22009-05-26 18:50:28 +00002626 off = skb_gro_offset(skb);
2627 hlen = off + sizeof(*eth);
2628 eth = skb_gro_header_fast(skb, off);
2629 if (skb_gro_header_hard(skb, hlen)) {
2630 eth = skb_gro_header_slow(skb, hlen, off);
2631 if (unlikely(!eth)) {
2632 napi_reuse_skb(napi, skb);
2633 skb = NULL;
2634 goto out;
2635 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002636 }
2637
2638 skb_gro_pull(skb, sizeof(*eth));
2639
2640 /*
2641 * This works because the only protocols we care about don't require
2642 * special handling. We'll fix it up properly at the end.
2643 */
2644 skb->protocol = eth->h_proto;
2645
2646out:
2647 return skb;
2648}
2649EXPORT_SYMBOL(napi_frags_skb);
2650
2651int napi_gro_frags(struct napi_struct *napi)
2652{
2653 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002654
2655 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002656 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002657
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002658 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002659}
2660EXPORT_SYMBOL(napi_gro_frags);
2661
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002662static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
2664 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2666 unsigned long start_time = jiffies;
2667
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002668 napi->weight = weight_p;
2669 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 local_irq_disable();
2673 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002674 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002675 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002676 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002677 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 local_irq_enable();
2680
Herbert Xu8f1ead22009-03-26 00:59:10 -07002681 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002682 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002684 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685}
2686
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002687/**
2688 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002689 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002690 *
2691 * The entry's receive function will be scheduled to run
2692 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002693void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002694{
2695 unsigned long flags;
2696
2697 local_irq_save(flags);
2698 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2699 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2700 local_irq_restore(flags);
2701}
2702EXPORT_SYMBOL(__napi_schedule);
2703
Herbert Xud565b0a2008-12-15 23:38:52 -08002704void __napi_complete(struct napi_struct *n)
2705{
2706 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2707 BUG_ON(n->gro_list);
2708
2709 list_del(&n->poll_list);
2710 smp_mb__before_clear_bit();
2711 clear_bit(NAPI_STATE_SCHED, &n->state);
2712}
2713EXPORT_SYMBOL(__napi_complete);
2714
2715void napi_complete(struct napi_struct *n)
2716{
2717 unsigned long flags;
2718
2719 /*
2720 * don't let napi dequeue from the cpu poll list
2721 * just in case its running on a different cpu
2722 */
2723 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2724 return;
2725
2726 napi_gro_flush(n);
2727 local_irq_save(flags);
2728 __napi_complete(n);
2729 local_irq_restore(flags);
2730}
2731EXPORT_SYMBOL(napi_complete);
2732
2733void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2734 int (*poll)(struct napi_struct *, int), int weight)
2735{
2736 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002737 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002738 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002739 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002740 napi->poll = poll;
2741 napi->weight = weight;
2742 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002743 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002744#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002745 spin_lock_init(&napi->poll_lock);
2746 napi->poll_owner = -1;
2747#endif
2748 set_bit(NAPI_STATE_SCHED, &napi->state);
2749}
2750EXPORT_SYMBOL(netif_napi_add);
2751
2752void netif_napi_del(struct napi_struct *napi)
2753{
2754 struct sk_buff *skb, *next;
2755
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002756 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002757 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002758
2759 for (skb = napi->gro_list; skb; skb = next) {
2760 next = skb->next;
2761 skb->next = NULL;
2762 kfree_skb(skb);
2763 }
2764
2765 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002766 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002767}
2768EXPORT_SYMBOL(netif_napi_del);
2769
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771static void net_rx_action(struct softirq_action *h)
2772{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002773 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002774 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002775 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002776 void *have;
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 local_irq_disable();
2779
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002780 while (!list_empty(list)) {
2781 struct napi_struct *n;
2782 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002784 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002785 * Allow this to run for 2 jiffies since which will allow
2786 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002787 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002788 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 goto softnet_break;
2790
2791 local_irq_enable();
2792
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002793 /* Even though interrupts have been re-enabled, this
2794 * access is safe because interrupts can only add new
2795 * entries to the tail of this list, and only ->poll()
2796 * calls can remove this head entry from the list.
2797 */
2798 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002800 have = netpoll_poll_lock(n);
2801
2802 weight = n->weight;
2803
David S. Miller0a7606c2007-10-29 21:28:47 -07002804 /* This NAPI_STATE_SCHED test is for avoiding a race
2805 * with netpoll's poll_napi(). Only the entity which
2806 * obtains the lock and sees NAPI_STATE_SCHED set will
2807 * actually make the ->poll() call. Therefore we avoid
2808 * accidently calling ->poll() when NAPI is not scheduled.
2809 */
2810 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002811 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002812 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002813 trace_napi_poll(n);
2814 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002815
2816 WARN_ON_ONCE(work > weight);
2817
2818 budget -= work;
2819
2820 local_irq_disable();
2821
2822 /* Drivers must not modify the NAPI state if they
2823 * consume the entire weight. In such cases this code
2824 * still "owns" the NAPI instance and therefore can
2825 * move the instance around on the list at-will.
2826 */
David S. Millerfed17f32008-01-07 21:00:40 -08002827 if (unlikely(work == weight)) {
2828 if (unlikely(napi_disable_pending(n)))
2829 __napi_complete(n);
2830 else
2831 list_move_tail(&n->poll_list, list);
2832 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002833
2834 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 }
2836out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002837 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002838
Chris Leechdb217332006-06-17 21:24:58 -07002839#ifdef CONFIG_NET_DMA
2840 /*
2841 * There may not be any more sk_buffs coming right now, so push
2842 * any pending DMA copies to hardware
2843 */
Dan Williams2ba05622009-01-06 11:38:14 -07002844 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002845#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 return;
2848
2849softnet_break:
2850 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2851 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2852 goto out;
2853}
2854
2855static gifconf_func_t * gifconf_list [NPROTO];
2856
2857/**
2858 * register_gifconf - register a SIOCGIF handler
2859 * @family: Address family
2860 * @gifconf: Function handler
2861 *
2862 * Register protocol dependent address dumping routines. The handler
2863 * that is passed must not be freed or reused until it has been replaced
2864 * by another handler.
2865 */
2866int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2867{
2868 if (family >= NPROTO)
2869 return -EINVAL;
2870 gifconf_list[family] = gifconf;
2871 return 0;
2872}
2873
2874
2875/*
2876 * Map an interface index to its name (SIOCGIFNAME)
2877 */
2878
2879/*
2880 * We need this ioctl for efficient implementation of the
2881 * if_indextoname() function required by the IPv6 API. Without
2882 * it, we would have to search all the interfaces to find a
2883 * match. --pb
2884 */
2885
Eric W. Biederman881d9662007-09-17 11:56:21 -07002886static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
2888 struct net_device *dev;
2889 struct ifreq ifr;
2890
2891 /*
2892 * Fetch the caller's info block.
2893 */
2894
2895 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2896 return -EFAULT;
2897
2898 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002899 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 if (!dev) {
2901 read_unlock(&dev_base_lock);
2902 return -ENODEV;
2903 }
2904
2905 strcpy(ifr.ifr_name, dev->name);
2906 read_unlock(&dev_base_lock);
2907
2908 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2909 return -EFAULT;
2910 return 0;
2911}
2912
2913/*
2914 * Perform a SIOCGIFCONF call. This structure will change
2915 * size eventually, and there is nothing I can do about it.
2916 * Thus we will need a 'compatibility mode'.
2917 */
2918
Eric W. Biederman881d9662007-09-17 11:56:21 -07002919static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920{
2921 struct ifconf ifc;
2922 struct net_device *dev;
2923 char __user *pos;
2924 int len;
2925 int total;
2926 int i;
2927
2928 /*
2929 * Fetch the caller's info block.
2930 */
2931
2932 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2933 return -EFAULT;
2934
2935 pos = ifc.ifc_buf;
2936 len = ifc.ifc_len;
2937
2938 /*
2939 * Loop over the interfaces, and write an info block for each.
2940 */
2941
2942 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002943 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 for (i = 0; i < NPROTO; i++) {
2945 if (gifconf_list[i]) {
2946 int done;
2947 if (!pos)
2948 done = gifconf_list[i](dev, NULL, 0);
2949 else
2950 done = gifconf_list[i](dev, pos + total,
2951 len - total);
2952 if (done < 0)
2953 return -EFAULT;
2954 total += done;
2955 }
2956 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
2959 /*
2960 * All done. Write the updated control block back to the caller.
2961 */
2962 ifc.ifc_len = total;
2963
2964 /*
2965 * Both BSD and Solaris return 0 here, so we do too.
2966 */
2967 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2968}
2969
2970#ifdef CONFIG_PROC_FS
2971/*
2972 * This is invoked by the /proc filesystem handler to display a device
2973 * in detail.
2974 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002976 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
Denis V. Luneve372c412007-11-19 22:31:54 -08002978 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002979 loff_t off;
2980 struct net_device *dev;
2981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002983 if (!*pos)
2984 return SEQ_START_TOKEN;
2985
2986 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002987 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002988 if (off++ == *pos)
2989 return dev;
2990
2991 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992}
2993
2994void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2995{
Denis V. Luneve372c412007-11-19 22:31:54 -08002996 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002998 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002999 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000}
3001
3002void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003003 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004{
3005 read_unlock(&dev_base_lock);
3006}
3007
3008static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3009{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003010 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Rusty Russell5a1b5892007-04-28 21:04:03 -07003012 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3013 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3014 dev->name, stats->rx_bytes, stats->rx_packets,
3015 stats->rx_errors,
3016 stats->rx_dropped + stats->rx_missed_errors,
3017 stats->rx_fifo_errors,
3018 stats->rx_length_errors + stats->rx_over_errors +
3019 stats->rx_crc_errors + stats->rx_frame_errors,
3020 stats->rx_compressed, stats->multicast,
3021 stats->tx_bytes, stats->tx_packets,
3022 stats->tx_errors, stats->tx_dropped,
3023 stats->tx_fifo_errors, stats->collisions,
3024 stats->tx_carrier_errors +
3025 stats->tx_aborted_errors +
3026 stats->tx_window_errors +
3027 stats->tx_heartbeat_errors,
3028 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029}
3030
3031/*
3032 * Called from the PROCfs module. This now uses the new arbitrary sized
3033 * /proc/net interface to create /proc/net/dev
3034 */
3035static int dev_seq_show(struct seq_file *seq, void *v)
3036{
3037 if (v == SEQ_START_TOKEN)
3038 seq_puts(seq, "Inter-| Receive "
3039 " | Transmit\n"
3040 " face |bytes packets errs drop fifo frame "
3041 "compressed multicast|bytes packets errs "
3042 "drop fifo colls carrier compressed\n");
3043 else
3044 dev_seq_printf_stats(seq, v);
3045 return 0;
3046}
3047
3048static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3049{
3050 struct netif_rx_stats *rc = NULL;
3051
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003052 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003053 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 rc = &per_cpu(netdev_rx_stat, *pos);
3055 break;
3056 } else
3057 ++*pos;
3058 return rc;
3059}
3060
3061static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3062{
3063 return softnet_get_online(pos);
3064}
3065
3066static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3067{
3068 ++*pos;
3069 return softnet_get_online(pos);
3070}
3071
3072static void softnet_seq_stop(struct seq_file *seq, void *v)
3073{
3074}
3075
3076static int softnet_seq_show(struct seq_file *seq, void *v)
3077{
3078 struct netif_rx_stats *s = v;
3079
3080 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003081 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003082 0, 0, 0, 0, /* was fastroute */
3083 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 return 0;
3085}
3086
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003087static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 .start = dev_seq_start,
3089 .next = dev_seq_next,
3090 .stop = dev_seq_stop,
3091 .show = dev_seq_show,
3092};
3093
3094static int dev_seq_open(struct inode *inode, struct file *file)
3095{
Denis V. Luneve372c412007-11-19 22:31:54 -08003096 return seq_open_net(inode, file, &dev_seq_ops,
3097 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098}
3099
Arjan van de Ven9a321442007-02-12 00:55:35 -08003100static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 .owner = THIS_MODULE,
3102 .open = dev_seq_open,
3103 .read = seq_read,
3104 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003105 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106};
3107
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003108static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 .start = softnet_seq_start,
3110 .next = softnet_seq_next,
3111 .stop = softnet_seq_stop,
3112 .show = softnet_seq_show,
3113};
3114
3115static int softnet_seq_open(struct inode *inode, struct file *file)
3116{
3117 return seq_open(file, &softnet_seq_ops);
3118}
3119
Arjan van de Ven9a321442007-02-12 00:55:35 -08003120static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 .owner = THIS_MODULE,
3122 .open = softnet_seq_open,
3123 .read = seq_read,
3124 .llseek = seq_lseek,
3125 .release = seq_release,
3126};
3127
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003128static void *ptype_get_idx(loff_t pos)
3129{
3130 struct packet_type *pt = NULL;
3131 loff_t i = 0;
3132 int t;
3133
3134 list_for_each_entry_rcu(pt, &ptype_all, list) {
3135 if (i == pos)
3136 return pt;
3137 ++i;
3138 }
3139
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003140 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003141 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3142 if (i == pos)
3143 return pt;
3144 ++i;
3145 }
3146 }
3147 return NULL;
3148}
3149
3150static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003151 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003152{
3153 rcu_read_lock();
3154 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3155}
3156
3157static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3158{
3159 struct packet_type *pt;
3160 struct list_head *nxt;
3161 int hash;
3162
3163 ++*pos;
3164 if (v == SEQ_START_TOKEN)
3165 return ptype_get_idx(0);
3166
3167 pt = v;
3168 nxt = pt->list.next;
3169 if (pt->type == htons(ETH_P_ALL)) {
3170 if (nxt != &ptype_all)
3171 goto found;
3172 hash = 0;
3173 nxt = ptype_base[0].next;
3174 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003175 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003176
3177 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003178 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003179 return NULL;
3180 nxt = ptype_base[hash].next;
3181 }
3182found:
3183 return list_entry(nxt, struct packet_type, list);
3184}
3185
3186static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003187 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003188{
3189 rcu_read_unlock();
3190}
3191
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003192static int ptype_seq_show(struct seq_file *seq, void *v)
3193{
3194 struct packet_type *pt = v;
3195
3196 if (v == SEQ_START_TOKEN)
3197 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003198 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003199 if (pt->type == htons(ETH_P_ALL))
3200 seq_puts(seq, "ALL ");
3201 else
3202 seq_printf(seq, "%04x", ntohs(pt->type));
3203
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003204 seq_printf(seq, " %-8s %pF\n",
3205 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003206 }
3207
3208 return 0;
3209}
3210
3211static const struct seq_operations ptype_seq_ops = {
3212 .start = ptype_seq_start,
3213 .next = ptype_seq_next,
3214 .stop = ptype_seq_stop,
3215 .show = ptype_seq_show,
3216};
3217
3218static int ptype_seq_open(struct inode *inode, struct file *file)
3219{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003220 return seq_open_net(inode, file, &ptype_seq_ops,
3221 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003222}
3223
3224static const struct file_operations ptype_seq_fops = {
3225 .owner = THIS_MODULE,
3226 .open = ptype_seq_open,
3227 .read = seq_read,
3228 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003229 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003230};
3231
3232
Pavel Emelyanov46650792007-10-08 20:38:39 -07003233static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
3235 int rc = -ENOMEM;
3236
Eric W. Biederman881d9662007-09-17 11:56:21 -07003237 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003239 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003241 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003242 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003243
Eric W. Biederman881d9662007-09-17 11:56:21 -07003244 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003245 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 rc = 0;
3247out:
3248 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003249out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003250 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003252 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003254 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 goto out;
3256}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003257
Pavel Emelyanov46650792007-10-08 20:38:39 -07003258static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003259{
3260 wext_proc_exit(net);
3261
3262 proc_net_remove(net, "ptype");
3263 proc_net_remove(net, "softnet_stat");
3264 proc_net_remove(net, "dev");
3265}
3266
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003267static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003268 .init = dev_proc_net_init,
3269 .exit = dev_proc_net_exit,
3270};
3271
3272static int __init dev_proc_init(void)
3273{
3274 return register_pernet_subsys(&dev_proc_ops);
3275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276#else
3277#define dev_proc_init() 0
3278#endif /* CONFIG_PROC_FS */
3279
3280
3281/**
3282 * netdev_set_master - set up master/slave pair
3283 * @slave: slave device
3284 * @master: new master device
3285 *
3286 * Changes the master device of the slave. Pass %NULL to break the
3287 * bonding. The caller must hold the RTNL semaphore. On a failure
3288 * a negative errno code is returned. On success the reference counts
3289 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3290 * function returns zero.
3291 */
3292int netdev_set_master(struct net_device *slave, struct net_device *master)
3293{
3294 struct net_device *old = slave->master;
3295
3296 ASSERT_RTNL();
3297
3298 if (master) {
3299 if (old)
3300 return -EBUSY;
3301 dev_hold(master);
3302 }
3303
3304 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003305
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 synchronize_net();
3307
3308 if (old)
3309 dev_put(old);
3310
3311 if (master)
3312 slave->flags |= IFF_SLAVE;
3313 else
3314 slave->flags &= ~IFF_SLAVE;
3315
3316 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3317 return 0;
3318}
3319
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003320static void dev_change_rx_flags(struct net_device *dev, int flags)
3321{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003322 const struct net_device_ops *ops = dev->netdev_ops;
3323
3324 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3325 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003326}
3327
Wang Chendad9b332008-06-18 01:48:28 -07003328static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003329{
3330 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003331 uid_t uid;
3332 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003333
Patrick McHardy24023452007-07-14 18:51:31 -07003334 ASSERT_RTNL();
3335
Wang Chendad9b332008-06-18 01:48:28 -07003336 dev->flags |= IFF_PROMISC;
3337 dev->promiscuity += inc;
3338 if (dev->promiscuity == 0) {
3339 /*
3340 * Avoid overflow.
3341 * If inc causes overflow, untouch promisc and return error.
3342 */
3343 if (inc < 0)
3344 dev->flags &= ~IFF_PROMISC;
3345 else {
3346 dev->promiscuity -= inc;
3347 printk(KERN_WARNING "%s: promiscuity touches roof, "
3348 "set promiscuity failed, promiscuity feature "
3349 "of device might be broken.\n", dev->name);
3350 return -EOVERFLOW;
3351 }
3352 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003353 if (dev->flags != old_flags) {
3354 printk(KERN_INFO "device %s %s promiscuous mode\n",
3355 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3356 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003357 if (audit_enabled) {
3358 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003359 audit_log(current->audit_context, GFP_ATOMIC,
3360 AUDIT_ANOM_PROMISCUOUS,
3361 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3362 dev->name, (dev->flags & IFF_PROMISC),
3363 (old_flags & IFF_PROMISC),
3364 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003365 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003366 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003367 }
Patrick McHardy24023452007-07-14 18:51:31 -07003368
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003369 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003370 }
Wang Chendad9b332008-06-18 01:48:28 -07003371 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003372}
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374/**
3375 * dev_set_promiscuity - update promiscuity count on a device
3376 * @dev: device
3377 * @inc: modifier
3378 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003379 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 * remains above zero the interface remains promiscuous. Once it hits zero
3381 * the device reverts back to normal filtering operation. A negative inc
3382 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003383 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 */
Wang Chendad9b332008-06-18 01:48:28 -07003385int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386{
3387 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003388 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
Wang Chendad9b332008-06-18 01:48:28 -07003390 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003391 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003392 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003393 if (dev->flags != old_flags)
3394 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003395 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396}
3397
3398/**
3399 * dev_set_allmulti - update allmulti count on a device
3400 * @dev: device
3401 * @inc: modifier
3402 *
3403 * Add or remove reception of all multicast frames to a device. While the
3404 * count in the device remains above zero the interface remains listening
3405 * to all interfaces. Once it hits zero the device reverts back to normal
3406 * filtering operation. A negative @inc value is used to drop the counter
3407 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003408 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 */
3410
Wang Chendad9b332008-06-18 01:48:28 -07003411int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412{
3413 unsigned short old_flags = dev->flags;
3414
Patrick McHardy24023452007-07-14 18:51:31 -07003415 ASSERT_RTNL();
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003418 dev->allmulti += inc;
3419 if (dev->allmulti == 0) {
3420 /*
3421 * Avoid overflow.
3422 * If inc causes overflow, untouch allmulti and return error.
3423 */
3424 if (inc < 0)
3425 dev->flags &= ~IFF_ALLMULTI;
3426 else {
3427 dev->allmulti -= inc;
3428 printk(KERN_WARNING "%s: allmulti touches roof, "
3429 "set allmulti failed, allmulti feature of "
3430 "device might be broken.\n", dev->name);
3431 return -EOVERFLOW;
3432 }
3433 }
Patrick McHardy24023452007-07-14 18:51:31 -07003434 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003435 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003436 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003437 }
Wang Chendad9b332008-06-18 01:48:28 -07003438 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003439}
3440
3441/*
3442 * Upload unicast and multicast address lists to device and
3443 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003444 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003445 * are present.
3446 */
3447void __dev_set_rx_mode(struct net_device *dev)
3448{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003449 const struct net_device_ops *ops = dev->netdev_ops;
3450
Patrick McHardy4417da62007-06-27 01:28:10 -07003451 /* dev_open will call this function so the list will stay sane. */
3452 if (!(dev->flags&IFF_UP))
3453 return;
3454
3455 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003456 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003457
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003458 if (ops->ndo_set_rx_mode)
3459 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003460 else {
3461 /* Unicast addresses changes may only happen under the rtnl,
3462 * therefore calling __dev_set_promiscuity here is safe.
3463 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003464 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003465 __dev_set_promiscuity(dev, 1);
3466 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003467 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003468 __dev_set_promiscuity(dev, -1);
3469 dev->uc_promisc = 0;
3470 }
3471
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003472 if (ops->ndo_set_multicast_list)
3473 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003474 }
3475}
3476
3477void dev_set_rx_mode(struct net_device *dev)
3478{
David S. Millerb9e40852008-07-15 00:15:08 -07003479 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003480 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003481 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Jiri Pirkof001fde2009-05-05 02:48:28 +00003484/* hw addresses list handling functions */
3485
Jiri Pirko31278e72009-06-17 01:12:19 +00003486static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3487 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003488{
3489 struct netdev_hw_addr *ha;
3490 int alloc_size;
3491
3492 if (addr_len > MAX_ADDR_LEN)
3493 return -EINVAL;
3494
Jiri Pirko31278e72009-06-17 01:12:19 +00003495 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003496 if (!memcmp(ha->addr, addr, addr_len) &&
3497 ha->type == addr_type) {
3498 ha->refcount++;
3499 return 0;
3500 }
3501 }
3502
3503
Jiri Pirkof001fde2009-05-05 02:48:28 +00003504 alloc_size = sizeof(*ha);
3505 if (alloc_size < L1_CACHE_BYTES)
3506 alloc_size = L1_CACHE_BYTES;
3507 ha = kmalloc(alloc_size, GFP_ATOMIC);
3508 if (!ha)
3509 return -ENOMEM;
3510 memcpy(ha->addr, addr, addr_len);
3511 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003512 ha->refcount = 1;
3513 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003514 list_add_tail_rcu(&ha->list, &list->list);
3515 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003516 return 0;
3517}
3518
3519static void ha_rcu_free(struct rcu_head *head)
3520{
3521 struct netdev_hw_addr *ha;
3522
3523 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3524 kfree(ha);
3525}
3526
Jiri Pirko31278e72009-06-17 01:12:19 +00003527static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3528 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003529{
3530 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003531
Jiri Pirko31278e72009-06-17 01:12:19 +00003532 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003533 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003534 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003535 if (--ha->refcount)
3536 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003537 list_del_rcu(&ha->list);
3538 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003539 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003540 return 0;
3541 }
3542 }
3543 return -ENOENT;
3544}
3545
Jiri Pirko31278e72009-06-17 01:12:19 +00003546static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3547 struct netdev_hw_addr_list *from_list,
3548 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003549 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003550{
3551 int err;
3552 struct netdev_hw_addr *ha, *ha2;
3553 unsigned char type;
3554
Jiri Pirko31278e72009-06-17 01:12:19 +00003555 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003556 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003557 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003558 if (err)
3559 goto unroll;
3560 }
3561 return 0;
3562
3563unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003564 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003565 if (ha2 == ha)
3566 break;
3567 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003568 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003569 }
3570 return err;
3571}
3572
Jiri Pirko31278e72009-06-17 01:12:19 +00003573static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3574 struct netdev_hw_addr_list *from_list,
3575 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003576 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003577{
3578 struct netdev_hw_addr *ha;
3579 unsigned char type;
3580
Jiri Pirko31278e72009-06-17 01:12:19 +00003581 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003582 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003583 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003584 }
3585}
3586
Jiri Pirko31278e72009-06-17 01:12:19 +00003587static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3588 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003589 int addr_len)
3590{
3591 int err = 0;
3592 struct netdev_hw_addr *ha, *tmp;
3593
Jiri Pirko31278e72009-06-17 01:12:19 +00003594 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003595 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003596 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003597 addr_len, ha->type);
3598 if (err)
3599 break;
3600 ha->synced = true;
3601 ha->refcount++;
3602 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003603 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3604 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003605 }
3606 }
3607 return err;
3608}
3609
Jiri Pirko31278e72009-06-17 01:12:19 +00003610static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3611 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003612 int addr_len)
3613{
3614 struct netdev_hw_addr *ha, *tmp;
3615
Jiri Pirko31278e72009-06-17 01:12:19 +00003616 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003617 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003618 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003619 addr_len, ha->type);
3620 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003621 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003622 addr_len, ha->type);
3623 }
3624 }
3625}
3626
Jiri Pirko31278e72009-06-17 01:12:19 +00003627static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003628{
3629 struct netdev_hw_addr *ha, *tmp;
3630
Jiri Pirko31278e72009-06-17 01:12:19 +00003631 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003632 list_del_rcu(&ha->list);
3633 call_rcu(&ha->rcu_head, ha_rcu_free);
3634 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003635 list->count = 0;
3636}
3637
3638static void __hw_addr_init(struct netdev_hw_addr_list *list)
3639{
3640 INIT_LIST_HEAD(&list->list);
3641 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003642}
3643
3644/* Device addresses handling functions */
3645
3646static void dev_addr_flush(struct net_device *dev)
3647{
3648 /* rtnl_mutex must be held here */
3649
Jiri Pirko31278e72009-06-17 01:12:19 +00003650 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003651 dev->dev_addr = NULL;
3652}
3653
3654static int dev_addr_init(struct net_device *dev)
3655{
3656 unsigned char addr[MAX_ADDR_LEN];
3657 struct netdev_hw_addr *ha;
3658 int err;
3659
3660 /* rtnl_mutex must be held here */
3661
Jiri Pirko31278e72009-06-17 01:12:19 +00003662 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003663 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003664 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003665 NETDEV_HW_ADDR_T_LAN);
3666 if (!err) {
3667 /*
3668 * Get the first (previously created) address from the list
3669 * and set dev_addr pointer to this location.
3670 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003671 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003672 struct netdev_hw_addr, list);
3673 dev->dev_addr = ha->addr;
3674 }
3675 return err;
3676}
3677
3678/**
3679 * dev_addr_add - Add a device address
3680 * @dev: device
3681 * @addr: address to add
3682 * @addr_type: address type
3683 *
3684 * Add a device address to the device or increase the reference count if
3685 * it already exists.
3686 *
3687 * The caller must hold the rtnl_mutex.
3688 */
3689int dev_addr_add(struct net_device *dev, unsigned char *addr,
3690 unsigned char addr_type)
3691{
3692 int err;
3693
3694 ASSERT_RTNL();
3695
Jiri Pirko31278e72009-06-17 01:12:19 +00003696 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003697 if (!err)
3698 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3699 return err;
3700}
3701EXPORT_SYMBOL(dev_addr_add);
3702
3703/**
3704 * dev_addr_del - Release a device address.
3705 * @dev: device
3706 * @addr: address to delete
3707 * @addr_type: address type
3708 *
3709 * Release reference to a device address and remove it from the device
3710 * if the reference count drops to zero.
3711 *
3712 * The caller must hold the rtnl_mutex.
3713 */
3714int dev_addr_del(struct net_device *dev, unsigned char *addr,
3715 unsigned char addr_type)
3716{
3717 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003718 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003719
3720 ASSERT_RTNL();
3721
Jiri Pirkoccffad252009-05-22 23:22:17 +00003722 /*
3723 * We can not remove the first address from the list because
3724 * dev->dev_addr points to that.
3725 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003726 ha = list_first_entry(&dev->dev_addrs.list,
3727 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003728 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3729 return -ENOENT;
3730
Jiri Pirko31278e72009-06-17 01:12:19 +00003731 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003732 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003733 if (!err)
3734 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3735 return err;
3736}
3737EXPORT_SYMBOL(dev_addr_del);
3738
3739/**
3740 * dev_addr_add_multiple - Add device addresses from another device
3741 * @to_dev: device to which addresses will be added
3742 * @from_dev: device from which addresses will be added
3743 * @addr_type: address type - 0 means type will be used from from_dev
3744 *
3745 * Add device addresses of the one device to another.
3746 **
3747 * The caller must hold the rtnl_mutex.
3748 */
3749int dev_addr_add_multiple(struct net_device *to_dev,
3750 struct net_device *from_dev,
3751 unsigned char addr_type)
3752{
3753 int err;
3754
3755 ASSERT_RTNL();
3756
3757 if (from_dev->addr_len != to_dev->addr_len)
3758 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003759 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003760 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003761 if (!err)
3762 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3763 return err;
3764}
3765EXPORT_SYMBOL(dev_addr_add_multiple);
3766
3767/**
3768 * dev_addr_del_multiple - Delete device addresses by another device
3769 * @to_dev: device where the addresses will be deleted
3770 * @from_dev: device by which addresses the addresses will be deleted
3771 * @addr_type: address type - 0 means type will used from from_dev
3772 *
3773 * Deletes addresses in to device by the list of addresses in from device.
3774 *
3775 * The caller must hold the rtnl_mutex.
3776 */
3777int dev_addr_del_multiple(struct net_device *to_dev,
3778 struct net_device *from_dev,
3779 unsigned char addr_type)
3780{
3781 ASSERT_RTNL();
3782
3783 if (from_dev->addr_len != to_dev->addr_len)
3784 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003785 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003786 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003787 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3788 return 0;
3789}
3790EXPORT_SYMBOL(dev_addr_del_multiple);
3791
Jiri Pirko31278e72009-06-17 01:12:19 +00003792/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003793
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003794int __dev_addr_delete(struct dev_addr_list **list, int *count,
3795 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003796{
3797 struct dev_addr_list *da;
3798
3799 for (; (da = *list) != NULL; list = &da->next) {
3800 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3801 alen == da->da_addrlen) {
3802 if (glbl) {
3803 int old_glbl = da->da_gusers;
3804 da->da_gusers = 0;
3805 if (old_glbl == 0)
3806 break;
3807 }
3808 if (--da->da_users)
3809 return 0;
3810
3811 *list = da->next;
3812 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003813 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003814 return 0;
3815 }
3816 }
3817 return -ENOENT;
3818}
3819
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003820int __dev_addr_add(struct dev_addr_list **list, int *count,
3821 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003822{
3823 struct dev_addr_list *da;
3824
3825 for (da = *list; da != NULL; da = da->next) {
3826 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3827 da->da_addrlen == alen) {
3828 if (glbl) {
3829 int old_glbl = da->da_gusers;
3830 da->da_gusers = 1;
3831 if (old_glbl)
3832 return 0;
3833 }
3834 da->da_users++;
3835 return 0;
3836 }
3837 }
3838
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003839 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003840 if (da == NULL)
3841 return -ENOMEM;
3842 memcpy(da->da_addr, addr, alen);
3843 da->da_addrlen = alen;
3844 da->da_users = 1;
3845 da->da_gusers = glbl ? 1 : 0;
3846 da->next = *list;
3847 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003848 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003849 return 0;
3850}
3851
Patrick McHardy4417da62007-06-27 01:28:10 -07003852/**
3853 * dev_unicast_delete - Release secondary unicast address.
3854 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003855 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003856 *
3857 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003858 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003859 *
3860 * The caller must hold the rtnl_mutex.
3861 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003862int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003863{
3864 int err;
3865
3866 ASSERT_RTNL();
3867
Jiri Pirko31278e72009-06-17 01:12:19 +00003868 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3869 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003870 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003871 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003872 return err;
3873}
3874EXPORT_SYMBOL(dev_unicast_delete);
3875
3876/**
3877 * dev_unicast_add - add a secondary unicast address
3878 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003879 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003880 *
3881 * Add a secondary unicast address to the device or increase
3882 * the reference count if it already exists.
3883 *
3884 * The caller must hold the rtnl_mutex.
3885 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003886int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003887{
3888 int err;
3889
3890 ASSERT_RTNL();
3891
Jiri Pirko31278e72009-06-17 01:12:19 +00003892 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3893 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003894 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003895 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003896 return err;
3897}
3898EXPORT_SYMBOL(dev_unicast_add);
3899
Chris Leeche83a2ea2008-01-31 16:53:23 -08003900int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3901 struct dev_addr_list **from, int *from_count)
3902{
3903 struct dev_addr_list *da, *next;
3904 int err = 0;
3905
3906 da = *from;
3907 while (da != NULL) {
3908 next = da->next;
3909 if (!da->da_synced) {
3910 err = __dev_addr_add(to, to_count,
3911 da->da_addr, da->da_addrlen, 0);
3912 if (err < 0)
3913 break;
3914 da->da_synced = 1;
3915 da->da_users++;
3916 } else if (da->da_users == 1) {
3917 __dev_addr_delete(to, to_count,
3918 da->da_addr, da->da_addrlen, 0);
3919 __dev_addr_delete(from, from_count,
3920 da->da_addr, da->da_addrlen, 0);
3921 }
3922 da = next;
3923 }
3924 return err;
3925}
3926
3927void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3928 struct dev_addr_list **from, int *from_count)
3929{
3930 struct dev_addr_list *da, *next;
3931
3932 da = *from;
3933 while (da != NULL) {
3934 next = da->next;
3935 if (da->da_synced) {
3936 __dev_addr_delete(to, to_count,
3937 da->da_addr, da->da_addrlen, 0);
3938 da->da_synced = 0;
3939 __dev_addr_delete(from, from_count,
3940 da->da_addr, da->da_addrlen, 0);
3941 }
3942 da = next;
3943 }
3944}
3945
3946/**
3947 * dev_unicast_sync - Synchronize device's unicast list to another device
3948 * @to: destination device
3949 * @from: source device
3950 *
3951 * Add newly added addresses to the destination device and release
Jiri Pirkoccffad252009-05-22 23:22:17 +00003952 * addresses that have no users left.
Chris Leeche83a2ea2008-01-31 16:53:23 -08003953 *
3954 * This function is intended to be called from the dev->set_rx_mode
3955 * function of layered software devices.
3956 */
3957int dev_unicast_sync(struct net_device *to, struct net_device *from)
3958{
3959 int err = 0;
3960
Jiri Pirkoccffad252009-05-22 23:22:17 +00003961 ASSERT_RTNL();
3962
3963 if (to->addr_len != from->addr_len)
3964 return -EINVAL;
3965
Jiri Pirko31278e72009-06-17 01:12:19 +00003966 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003967 if (!err)
3968 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003969 return err;
3970}
3971EXPORT_SYMBOL(dev_unicast_sync);
3972
3973/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003974 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003975 * @to: destination device
3976 * @from: source device
3977 *
3978 * Remove all addresses that were added to the destination device by
3979 * dev_unicast_sync(). This function is intended to be called from the
3980 * dev->stop function of layered software devices.
3981 */
3982void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3983{
Jiri Pirkoccffad252009-05-22 23:22:17 +00003984 ASSERT_RTNL();
Chris Leeche83a2ea2008-01-31 16:53:23 -08003985
Jiri Pirkoccffad252009-05-22 23:22:17 +00003986 if (to->addr_len != from->addr_len)
3987 return;
3988
Jiri Pirko31278e72009-06-17 01:12:19 +00003989 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003990 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003991}
3992EXPORT_SYMBOL(dev_unicast_unsync);
3993
Jiri Pirkoccffad252009-05-22 23:22:17 +00003994static void dev_unicast_flush(struct net_device *dev)
3995{
3996 /* rtnl_mutex must be held here */
3997
Jiri Pirko31278e72009-06-17 01:12:19 +00003998 __hw_addr_flush(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003999}
4000
4001static void dev_unicast_init(struct net_device *dev)
4002{
4003 /* rtnl_mutex must be held here */
4004
Jiri Pirko31278e72009-06-17 01:12:19 +00004005 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004006}
4007
4008
Denis Cheng12972622007-07-18 02:12:56 -07004009static void __dev_addr_discard(struct dev_addr_list **list)
4010{
4011 struct dev_addr_list *tmp;
4012
4013 while (*list != NULL) {
4014 tmp = *list;
4015 *list = tmp->next;
4016 if (tmp->da_users > tmp->da_gusers)
4017 printk("__dev_addr_discard: address leakage! "
4018 "da_users=%d\n", tmp->da_users);
4019 kfree(tmp);
4020 }
4021}
4022
Denis Cheng26cc2522007-07-18 02:12:03 -07004023static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004024{
David S. Millerb9e40852008-07-15 00:15:08 -07004025 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004026
Denis Cheng456ad752007-07-18 02:10:54 -07004027 __dev_addr_discard(&dev->mc_list);
4028 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004029
David S. Millerb9e40852008-07-15 00:15:08 -07004030 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004031}
4032
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004033/**
4034 * dev_get_flags - get flags reported to userspace
4035 * @dev: device
4036 *
4037 * Get the combination of flag bits exported through APIs to userspace.
4038 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039unsigned dev_get_flags(const struct net_device *dev)
4040{
4041 unsigned flags;
4042
4043 flags = (dev->flags & ~(IFF_PROMISC |
4044 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004045 IFF_RUNNING |
4046 IFF_LOWER_UP |
4047 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 (dev->gflags & (IFF_PROMISC |
4049 IFF_ALLMULTI));
4050
Stefan Rompfb00055a2006-03-20 17:09:11 -08004051 if (netif_running(dev)) {
4052 if (netif_oper_up(dev))
4053 flags |= IFF_RUNNING;
4054 if (netif_carrier_ok(dev))
4055 flags |= IFF_LOWER_UP;
4056 if (netif_dormant(dev))
4057 flags |= IFF_DORMANT;
4058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
4060 return flags;
4061}
4062
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004063/**
4064 * dev_change_flags - change device settings
4065 * @dev: device
4066 * @flags: device state flags
4067 *
4068 * Change settings on device based state flags. The flags are
4069 * in the userspace exported format.
4070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071int dev_change_flags(struct net_device *dev, unsigned flags)
4072{
Thomas Graf7c355f52007-06-05 16:03:03 -07004073 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 int old_flags = dev->flags;
4075
Patrick McHardy24023452007-07-14 18:51:31 -07004076 ASSERT_RTNL();
4077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 /*
4079 * Set the flags on our device.
4080 */
4081
4082 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4083 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4084 IFF_AUTOMEDIA)) |
4085 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4086 IFF_ALLMULTI));
4087
4088 /*
4089 * Load in the correct multicast list now the flags have changed.
4090 */
4091
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004092 if ((old_flags ^ flags) & IFF_MULTICAST)
4093 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004094
Patrick McHardy4417da62007-06-27 01:28:10 -07004095 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096
4097 /*
4098 * Have we downed the interface. We handle IFF_UP ourselves
4099 * according to user attempts to set it, rather than blindly
4100 * setting it.
4101 */
4102
4103 ret = 0;
4104 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4105 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4106
4107 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004108 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109 }
4110
4111 if (dev->flags & IFF_UP &&
4112 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4113 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004114 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
4116 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4117 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4118 dev->gflags ^= IFF_PROMISC;
4119 dev_set_promiscuity(dev, inc);
4120 }
4121
4122 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4123 is important. Some (broken) drivers set IFF_PROMISC, when
4124 IFF_ALLMULTI is requested not asking us and not reporting.
4125 */
4126 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4127 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4128 dev->gflags ^= IFF_ALLMULTI;
4129 dev_set_allmulti(dev, inc);
4130 }
4131
Thomas Graf7c355f52007-06-05 16:03:03 -07004132 /* Exclude state transition flags, already notified */
4133 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4134 if (changes)
4135 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136
4137 return ret;
4138}
4139
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004140/**
4141 * dev_set_mtu - Change maximum transfer unit
4142 * @dev: device
4143 * @new_mtu: new transfer unit
4144 *
4145 * Change the maximum transfer size of the network device.
4146 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004147int dev_set_mtu(struct net_device *dev, int new_mtu)
4148{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004149 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004150 int err;
4151
4152 if (new_mtu == dev->mtu)
4153 return 0;
4154
4155 /* MTU must be positive. */
4156 if (new_mtu < 0)
4157 return -EINVAL;
4158
4159 if (!netif_device_present(dev))
4160 return -ENODEV;
4161
4162 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004163 if (ops->ndo_change_mtu)
4164 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165 else
4166 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004167
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004169 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 return err;
4171}
4172
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004173/**
4174 * dev_set_mac_address - Change Media Access Control Address
4175 * @dev: device
4176 * @sa: new address
4177 *
4178 * Change the hardware (MAC) address of the device
4179 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4181{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004182 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 int err;
4184
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004185 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 return -EOPNOTSUPP;
4187 if (sa->sa_family != dev->type)
4188 return -EINVAL;
4189 if (!netif_device_present(dev))
4190 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004191 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004193 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 return err;
4195}
4196
4197/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004198 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004200static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201{
4202 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004203 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204
4205 if (!dev)
4206 return -ENODEV;
4207
4208 switch (cmd) {
4209 case SIOCGIFFLAGS: /* Get interface flags */
John Dykstra746e6ad2009-06-11 20:57:21 -07004210 ifr->ifr_flags = (short) dev_get_flags(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 return 0;
4212
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 case SIOCGIFMETRIC: /* Get the metric on the interface
4214 (currently unused) */
4215 ifr->ifr_metric = 0;
4216 return 0;
4217
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 case SIOCGIFMTU: /* Get the MTU of a device */
4219 ifr->ifr_mtu = dev->mtu;
4220 return 0;
4221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 case SIOCGIFHWADDR:
4223 if (!dev->addr_len)
4224 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4225 else
4226 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4227 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4228 ifr->ifr_hwaddr.sa_family = dev->type;
4229 return 0;
4230
Jeff Garzik14e3e072007-10-08 00:06:32 -07004231 case SIOCGIFSLAVE:
4232 err = -EINVAL;
4233 break;
4234
4235 case SIOCGIFMAP:
4236 ifr->ifr_map.mem_start = dev->mem_start;
4237 ifr->ifr_map.mem_end = dev->mem_end;
4238 ifr->ifr_map.base_addr = dev->base_addr;
4239 ifr->ifr_map.irq = dev->irq;
4240 ifr->ifr_map.dma = dev->dma;
4241 ifr->ifr_map.port = dev->if_port;
4242 return 0;
4243
4244 case SIOCGIFINDEX:
4245 ifr->ifr_ifindex = dev->ifindex;
4246 return 0;
4247
4248 case SIOCGIFTXQLEN:
4249 ifr->ifr_qlen = dev->tx_queue_len;
4250 return 0;
4251
4252 default:
4253 /* dev_ioctl() should ensure this case
4254 * is never reached
4255 */
4256 WARN_ON(1);
4257 err = -EINVAL;
4258 break;
4259
4260 }
4261 return err;
4262}
4263
4264/*
4265 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4266 */
4267static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4268{
4269 int err;
4270 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004271 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004272
4273 if (!dev)
4274 return -ENODEV;
4275
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004276 ops = dev->netdev_ops;
4277
Jeff Garzik14e3e072007-10-08 00:06:32 -07004278 switch (cmd) {
4279 case SIOCSIFFLAGS: /* Set interface flags */
4280 return dev_change_flags(dev, ifr->ifr_flags);
4281
4282 case SIOCSIFMETRIC: /* Set the metric on the interface
4283 (currently unused) */
4284 return -EOPNOTSUPP;
4285
4286 case SIOCSIFMTU: /* Set the MTU of a device */
4287 return dev_set_mtu(dev, ifr->ifr_mtu);
4288
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289 case SIOCSIFHWADDR:
4290 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4291
4292 case SIOCSIFHWBROADCAST:
4293 if (ifr->ifr_hwaddr.sa_family != dev->type)
4294 return -EINVAL;
4295 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4296 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004297 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 return 0;
4299
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004301 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 if (!netif_device_present(dev))
4303 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004304 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 }
4306 return -EOPNOTSUPP;
4307
4308 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004309 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4311 return -EINVAL;
4312 if (!netif_device_present(dev))
4313 return -ENODEV;
4314 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4315 dev->addr_len, 1);
4316
4317 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004318 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4320 return -EINVAL;
4321 if (!netif_device_present(dev))
4322 return -ENODEV;
4323 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4324 dev->addr_len, 1);
4325
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326 case SIOCSIFTXQLEN:
4327 if (ifr->ifr_qlen < 0)
4328 return -EINVAL;
4329 dev->tx_queue_len = ifr->ifr_qlen;
4330 return 0;
4331
4332 case SIOCSIFNAME:
4333 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4334 return dev_change_name(dev, ifr->ifr_newname);
4335
4336 /*
4337 * Unknown or private ioctl
4338 */
4339
4340 default:
4341 if ((cmd >= SIOCDEVPRIVATE &&
4342 cmd <= SIOCDEVPRIVATE + 15) ||
4343 cmd == SIOCBONDENSLAVE ||
4344 cmd == SIOCBONDRELEASE ||
4345 cmd == SIOCBONDSETHWADDR ||
4346 cmd == SIOCBONDSLAVEINFOQUERY ||
4347 cmd == SIOCBONDINFOQUERY ||
4348 cmd == SIOCBONDCHANGEACTIVE ||
4349 cmd == SIOCGMIIPHY ||
4350 cmd == SIOCGMIIREG ||
4351 cmd == SIOCSMIIREG ||
4352 cmd == SIOCBRADDIF ||
4353 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004354 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355 cmd == SIOCWANDEV) {
4356 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004357 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004359 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 else
4361 err = -ENODEV;
4362 }
4363 } else
4364 err = -EINVAL;
4365
4366 }
4367 return err;
4368}
4369
4370/*
4371 * This function handles all "interface"-type I/O control requests. The actual
4372 * 'doing' part of this is dev_ifsioc above.
4373 */
4374
4375/**
4376 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004377 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 * @cmd: command to issue
4379 * @arg: pointer to a struct ifreq in user space
4380 *
4381 * Issue ioctl functions to devices. This is normally called by the
4382 * user space syscall interfaces but can sometimes be useful for
4383 * other purposes. The return value is the return from the syscall if
4384 * positive or a negative errno code on error.
4385 */
4386
Eric W. Biederman881d9662007-09-17 11:56:21 -07004387int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388{
4389 struct ifreq ifr;
4390 int ret;
4391 char *colon;
4392
4393 /* One special case: SIOCGIFCONF takes ifconf argument
4394 and requires shared lock, because it sleeps writing
4395 to user space.
4396 */
4397
4398 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004399 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004400 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004401 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 return ret;
4403 }
4404 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004405 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406
4407 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4408 return -EFAULT;
4409
4410 ifr.ifr_name[IFNAMSIZ-1] = 0;
4411
4412 colon = strchr(ifr.ifr_name, ':');
4413 if (colon)
4414 *colon = 0;
4415
4416 /*
4417 * See which interface the caller is talking about.
4418 */
4419
4420 switch (cmd) {
4421 /*
4422 * These ioctl calls:
4423 * - can be done by all.
4424 * - atomic and do not require locking.
4425 * - return a value
4426 */
4427 case SIOCGIFFLAGS:
4428 case SIOCGIFMETRIC:
4429 case SIOCGIFMTU:
4430 case SIOCGIFHWADDR:
4431 case SIOCGIFSLAVE:
4432 case SIOCGIFMAP:
4433 case SIOCGIFINDEX:
4434 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004435 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004437 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 read_unlock(&dev_base_lock);
4439 if (!ret) {
4440 if (colon)
4441 *colon = ':';
4442 if (copy_to_user(arg, &ifr,
4443 sizeof(struct ifreq)))
4444 ret = -EFAULT;
4445 }
4446 return ret;
4447
4448 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004449 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004451 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 rtnl_unlock();
4453 if (!ret) {
4454 if (colon)
4455 *colon = ':';
4456 if (copy_to_user(arg, &ifr,
4457 sizeof(struct ifreq)))
4458 ret = -EFAULT;
4459 }
4460 return ret;
4461
4462 /*
4463 * These ioctl calls:
4464 * - require superuser power.
4465 * - require strict serialization.
4466 * - return a value
4467 */
4468 case SIOCGMIIPHY:
4469 case SIOCGMIIREG:
4470 case SIOCSIFNAME:
4471 if (!capable(CAP_NET_ADMIN))
4472 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004473 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004475 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 rtnl_unlock();
4477 if (!ret) {
4478 if (colon)
4479 *colon = ':';
4480 if (copy_to_user(arg, &ifr,
4481 sizeof(struct ifreq)))
4482 ret = -EFAULT;
4483 }
4484 return ret;
4485
4486 /*
4487 * These ioctl calls:
4488 * - require superuser power.
4489 * - require strict serialization.
4490 * - do not return a value
4491 */
4492 case SIOCSIFFLAGS:
4493 case SIOCSIFMETRIC:
4494 case SIOCSIFMTU:
4495 case SIOCSIFMAP:
4496 case SIOCSIFHWADDR:
4497 case SIOCSIFSLAVE:
4498 case SIOCADDMULTI:
4499 case SIOCDELMULTI:
4500 case SIOCSIFHWBROADCAST:
4501 case SIOCSIFTXQLEN:
4502 case SIOCSMIIREG:
4503 case SIOCBONDENSLAVE:
4504 case SIOCBONDRELEASE:
4505 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 case SIOCBONDCHANGEACTIVE:
4507 case SIOCBRADDIF:
4508 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004509 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 if (!capable(CAP_NET_ADMIN))
4511 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004512 /* fall through */
4513 case SIOCBONDSLAVEINFOQUERY:
4514 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004515 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004517 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 rtnl_unlock();
4519 return ret;
4520
4521 case SIOCGIFMEM:
4522 /* Get the per device memory space. We can add this but
4523 * currently do not support it */
4524 case SIOCSIFMEM:
4525 /* Set the per device memory buffer space.
4526 * Not applicable in our case */
4527 case SIOCSIFLINK:
4528 return -EINVAL;
4529
4530 /*
4531 * Unknown or private ioctl.
4532 */
4533 default:
4534 if (cmd == SIOCWANDEV ||
4535 (cmd >= SIOCDEVPRIVATE &&
4536 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004537 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004539 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 rtnl_unlock();
4541 if (!ret && copy_to_user(arg, &ifr,
4542 sizeof(struct ifreq)))
4543 ret = -EFAULT;
4544 return ret;
4545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004547 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004548 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 return -EINVAL;
4550 }
4551}
4552
4553
4554/**
4555 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004556 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557 *
4558 * Returns a suitable unique value for a new device interface
4559 * number. The caller must hold the rtnl semaphore or the
4560 * dev_base_lock to be sure it remains unique.
4561 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004562static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563{
4564 static int ifindex;
4565 for (;;) {
4566 if (++ifindex <= 0)
4567 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004568 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 return ifindex;
4570 }
4571}
4572
Linus Torvalds1da177e2005-04-16 15:20:36 -07004573/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004574static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004576static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579}
4580
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004581static void rollback_registered(struct net_device *dev)
4582{
4583 BUG_ON(dev_boot_phase);
4584 ASSERT_RTNL();
4585
4586 /* Some devices call without registering for initialization unwind. */
4587 if (dev->reg_state == NETREG_UNINITIALIZED) {
4588 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4589 "was registered\n", dev->name, dev);
4590
4591 WARN_ON(1);
4592 return;
4593 }
4594
4595 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4596
4597 /* If device is running, close it first. */
4598 dev_close(dev);
4599
4600 /* And unlink it from device chain. */
4601 unlist_netdevice(dev);
4602
4603 dev->reg_state = NETREG_UNREGISTERING;
4604
4605 synchronize_net();
4606
4607 /* Shutdown queueing discipline. */
4608 dev_shutdown(dev);
4609
4610
4611 /* Notify protocols, that we are about to destroy
4612 this device. They should clean all the things.
4613 */
4614 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4615
4616 /*
4617 * Flush the unicast and multicast chains
4618 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004619 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004620 dev_addr_discard(dev);
4621
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004622 if (dev->netdev_ops->ndo_uninit)
4623 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004624
4625 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004626 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004627
4628 /* Remove entries from kobject tree */
4629 netdev_unregister_kobject(dev);
4630
4631 synchronize_net();
4632
4633 dev_put(dev);
4634}
4635
David S. Millere8a04642008-07-17 00:34:19 -07004636static void __netdev_init_queue_locks_one(struct net_device *dev,
4637 struct netdev_queue *dev_queue,
4638 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004639{
4640 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004641 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004642 dev_queue->xmit_lock_owner = -1;
4643}
4644
4645static void netdev_init_queue_locks(struct net_device *dev)
4646{
David S. Millere8a04642008-07-17 00:34:19 -07004647 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4648 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004649}
4650
Herbert Xub63365a2008-10-23 01:11:29 -07004651unsigned long netdev_fix_features(unsigned long features, const char *name)
4652{
4653 /* Fix illegal SG+CSUM combinations. */
4654 if ((features & NETIF_F_SG) &&
4655 !(features & NETIF_F_ALL_CSUM)) {
4656 if (name)
4657 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4658 "checksum feature.\n", name);
4659 features &= ~NETIF_F_SG;
4660 }
4661
4662 /* TSO requires that SG is present as well. */
4663 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4664 if (name)
4665 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4666 "SG feature.\n", name);
4667 features &= ~NETIF_F_TSO;
4668 }
4669
4670 if (features & NETIF_F_UFO) {
4671 if (!(features & NETIF_F_GEN_CSUM)) {
4672 if (name)
4673 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4674 "since no NETIF_F_HW_CSUM feature.\n",
4675 name);
4676 features &= ~NETIF_F_UFO;
4677 }
4678
4679 if (!(features & NETIF_F_SG)) {
4680 if (name)
4681 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4682 "since no NETIF_F_SG feature.\n", name);
4683 features &= ~NETIF_F_UFO;
4684 }
4685 }
4686
4687 return features;
4688}
4689EXPORT_SYMBOL(netdev_fix_features);
4690
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691/**
4692 * register_netdevice - register a network device
4693 * @dev: device to register
4694 *
4695 * Take a completed network device structure and add it to the kernel
4696 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4697 * chain. 0 is returned on success. A negative errno code is returned
4698 * on a failure to set up the device, or if the name is a duplicate.
4699 *
4700 * Callers must hold the rtnl semaphore. You may want
4701 * register_netdev() instead of this.
4702 *
4703 * BUGS:
4704 * The locking appears insufficient to guarantee two parallel registers
4705 * will not get the same name.
4706 */
4707
4708int register_netdevice(struct net_device *dev)
4709{
4710 struct hlist_head *head;
4711 struct hlist_node *p;
4712 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004713 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714
4715 BUG_ON(dev_boot_phase);
4716 ASSERT_RTNL();
4717
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004718 might_sleep();
4719
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720 /* When net_device's are persistent, this will be fatal. */
4721 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004722 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
David S. Millerf1f28aa2008-07-15 00:08:33 -07004724 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004725 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004726 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 dev->iflink = -1;
4729
4730 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004731 if (dev->netdev_ops->ndo_init) {
4732 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 if (ret) {
4734 if (ret > 0)
4735 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004736 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 }
4738 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004739
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740 if (!dev_valid_name(dev->name)) {
4741 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004742 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 }
4744
Eric W. Biederman881d9662007-09-17 11:56:21 -07004745 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 if (dev->iflink == -1)
4747 dev->iflink = dev->ifindex;
4748
4749 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004750 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 hlist_for_each(p, head) {
4752 struct net_device *d
4753 = hlist_entry(p, struct net_device, name_hlist);
4754 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4755 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004756 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004758 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004760 /* Fix illegal checksum combinations */
4761 if ((dev->features & NETIF_F_HW_CSUM) &&
4762 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4763 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4764 dev->name);
4765 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4766 }
4767
4768 if ((dev->features & NETIF_F_NO_CSUM) &&
4769 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4770 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4771 dev->name);
4772 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4773 }
4774
Herbert Xub63365a2008-10-23 01:11:29 -07004775 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004777 /* Enable software GSO if SG is supported. */
4778 if (dev->features & NETIF_F_SG)
4779 dev->features |= NETIF_F_GSO;
4780
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004781 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004782 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004783 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004784 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004785 dev->reg_state = NETREG_REGISTERED;
4786
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787 /*
4788 * Default initial state at registry is that the
4789 * device is present.
4790 */
4791
4792 set_bit(__LINK_STATE_PRESENT, &dev->state);
4793
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004796 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797
4798 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004799 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004800 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004801 if (ret) {
4802 rollback_registered(dev);
4803 dev->reg_state = NETREG_UNREGISTERED;
4804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004805
4806out:
4807 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004808
4809err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004810 if (dev->netdev_ops->ndo_uninit)
4811 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004812 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813}
4814
4815/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004816 * init_dummy_netdev - init a dummy network device for NAPI
4817 * @dev: device to init
4818 *
4819 * This takes a network device structure and initialize the minimum
4820 * amount of fields so it can be used to schedule NAPI polls without
4821 * registering a full blown interface. This is to be used by drivers
4822 * that need to tie several hardware interfaces to a single NAPI
4823 * poll scheduler due to HW limitations.
4824 */
4825int init_dummy_netdev(struct net_device *dev)
4826{
4827 /* Clear everything. Note we don't initialize spinlocks
4828 * are they aren't supposed to be taken by any of the
4829 * NAPI code and this dummy netdev is supposed to be
4830 * only ever used for NAPI polls
4831 */
4832 memset(dev, 0, sizeof(struct net_device));
4833
4834 /* make sure we BUG if trying to hit standard
4835 * register/unregister code path
4836 */
4837 dev->reg_state = NETREG_DUMMY;
4838
4839 /* initialize the ref count */
4840 atomic_set(&dev->refcnt, 1);
4841
4842 /* NAPI wants this */
4843 INIT_LIST_HEAD(&dev->napi_list);
4844
4845 /* a dummy interface is started by default */
4846 set_bit(__LINK_STATE_PRESENT, &dev->state);
4847 set_bit(__LINK_STATE_START, &dev->state);
4848
4849 return 0;
4850}
4851EXPORT_SYMBOL_GPL(init_dummy_netdev);
4852
4853
4854/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 * register_netdev - register a network device
4856 * @dev: device to register
4857 *
4858 * Take a completed network device structure and add it to the kernel
4859 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4860 * chain. 0 is returned on success. A negative errno code is returned
4861 * on a failure to set up the device, or if the name is a duplicate.
4862 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004863 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 * and expands the device name if you passed a format string to
4865 * alloc_netdev.
4866 */
4867int register_netdev(struct net_device *dev)
4868{
4869 int err;
4870
4871 rtnl_lock();
4872
4873 /*
4874 * If the name is a format string the caller wants us to do a
4875 * name allocation.
4876 */
4877 if (strchr(dev->name, '%')) {
4878 err = dev_alloc_name(dev, dev->name);
4879 if (err < 0)
4880 goto out;
4881 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004882
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 err = register_netdevice(dev);
4884out:
4885 rtnl_unlock();
4886 return err;
4887}
4888EXPORT_SYMBOL(register_netdev);
4889
4890/*
4891 * netdev_wait_allrefs - wait until all references are gone.
4892 *
4893 * This is called when unregistering network devices.
4894 *
4895 * Any protocol or device that holds a reference should register
4896 * for netdevice notification, and cleanup and put back the
4897 * reference if they receive an UNREGISTER event.
4898 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004899 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900 */
4901static void netdev_wait_allrefs(struct net_device *dev)
4902{
4903 unsigned long rebroadcast_time, warning_time;
4904
4905 rebroadcast_time = warning_time = jiffies;
4906 while (atomic_read(&dev->refcnt) != 0) {
4907 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004908 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909
4910 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004911 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004912
4913 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4914 &dev->state)) {
4915 /* We must not have linkwatch events
4916 * pending on unregister. If this
4917 * happens, we simply run the queue
4918 * unscheduled, resulting in a noop
4919 * for this device.
4920 */
4921 linkwatch_run_queue();
4922 }
4923
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004924 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925
4926 rebroadcast_time = jiffies;
4927 }
4928
4929 msleep(250);
4930
4931 if (time_after(jiffies, warning_time + 10 * HZ)) {
4932 printk(KERN_EMERG "unregister_netdevice: "
4933 "waiting for %s to become free. Usage "
4934 "count = %d\n",
4935 dev->name, atomic_read(&dev->refcnt));
4936 warning_time = jiffies;
4937 }
4938 }
4939}
4940
4941/* The sequence is:
4942 *
4943 * rtnl_lock();
4944 * ...
4945 * register_netdevice(x1);
4946 * register_netdevice(x2);
4947 * ...
4948 * unregister_netdevice(y1);
4949 * unregister_netdevice(y2);
4950 * ...
4951 * rtnl_unlock();
4952 * free_netdev(y1);
4953 * free_netdev(y2);
4954 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004955 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004957 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 * without deadlocking with linkwatch via keventd.
4959 * 2) Since we run with the RTNL semaphore not held, we can sleep
4960 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004961 *
4962 * We must not return until all unregister events added during
4963 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965void netdev_run_todo(void)
4966{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004967 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004970 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004971
4972 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004973
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 while (!list_empty(&list)) {
4975 struct net_device *dev
4976 = list_entry(list.next, struct net_device, todo_list);
4977 list_del(&dev->todo_list);
4978
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004979 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 printk(KERN_ERR "network todo '%s' but state %d\n",
4981 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004982 dump_stack();
4983 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004985
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004986 dev->reg_state = NETREG_UNREGISTERED;
4987
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004988 on_each_cpu(flush_backlog, dev, 1);
4989
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004990 netdev_wait_allrefs(dev);
4991
4992 /* paranoia */
4993 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004994 WARN_ON(dev->ip_ptr);
4995 WARN_ON(dev->ip6_ptr);
4996 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004997
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004998 if (dev->destructor)
4999 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005000
5001 /* Free network device */
5002 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004}
5005
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005006/**
5007 * dev_get_stats - get network device statistics
5008 * @dev: device to get statistics from
5009 *
5010 * Get network statistics from device. The device driver may provide
5011 * its own method by setting dev->netdev_ops->get_stats; otherwise
5012 * the internal statistics structure is used.
5013 */
5014const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005015{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005016 const struct net_device_ops *ops = dev->netdev_ops;
5017
5018 if (ops->ndo_get_stats)
5019 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005020 else {
5021 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5022 struct net_device_stats *stats = &dev->stats;
5023 unsigned int i;
5024 struct netdev_queue *txq;
5025
5026 for (i = 0; i < dev->num_tx_queues; i++) {
5027 txq = netdev_get_tx_queue(dev, i);
5028 tx_bytes += txq->tx_bytes;
5029 tx_packets += txq->tx_packets;
5030 tx_dropped += txq->tx_dropped;
5031 }
5032 if (tx_bytes || tx_packets || tx_dropped) {
5033 stats->tx_bytes = tx_bytes;
5034 stats->tx_packets = tx_packets;
5035 stats->tx_dropped = tx_dropped;
5036 }
5037 return stats;
5038 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005039}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005040EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005041
David S. Millerdc2b4842008-07-08 17:18:23 -07005042static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005043 struct netdev_queue *queue,
5044 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005045{
David S. Millerdc2b4842008-07-08 17:18:23 -07005046 queue->dev = dev;
5047}
5048
David S. Millerbb949fb2008-07-08 16:55:56 -07005049static void netdev_init_queues(struct net_device *dev)
5050{
David S. Millere8a04642008-07-17 00:34:19 -07005051 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5052 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005053 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005054}
5055
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005057 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005058 * @sizeof_priv: size of private data to allocate space for
5059 * @name: device name format string
5060 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005061 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 *
5063 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005064 * and performs basic initialization. Also allocates subquue structs
5065 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005067struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5068 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069{
David S. Millere8a04642008-07-17 00:34:19 -07005070 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005072 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005073 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005075 BUG_ON(strlen(name) >= sizeof(dev->name));
5076
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005077 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005078 if (sizeof_priv) {
5079 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005080 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005081 alloc_size += sizeof_priv;
5082 }
5083 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005084 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005086 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005088 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005089 return NULL;
5090 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
Stephen Hemminger79439862008-07-21 13:28:44 -07005092 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005093 if (!tx) {
5094 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5095 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005096 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005097 }
5098
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005099 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005101
5102 if (dev_addr_init(dev))
5103 goto free_tx;
5104
Jiri Pirkoccffad252009-05-22 23:22:17 +00005105 dev_unicast_init(dev);
5106
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005107 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108
David S. Millere8a04642008-07-17 00:34:19 -07005109 dev->_tx = tx;
5110 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005111 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005112
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005113 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
David S. Millerbb949fb2008-07-08 16:55:56 -07005115 netdev_init_queues(dev);
5116
Herbert Xud565b0a2008-12-15 23:38:52 -08005117 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005118 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005119 setup(dev);
5120 strcpy(dev->name, name);
5121 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005122
5123free_tx:
5124 kfree(tx);
5125
5126free_p:
5127 kfree(p);
5128 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005129}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005130EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131
5132/**
5133 * free_netdev - free network device
5134 * @dev: device
5135 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005136 * This function does the last stage of destroying an allocated device
5137 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 * If this is the last reference then it will be freed.
5139 */
5140void free_netdev(struct net_device *dev)
5141{
Herbert Xud565b0a2008-12-15 23:38:52 -08005142 struct napi_struct *p, *n;
5143
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005144 release_net(dev_net(dev));
5145
David S. Millere8a04642008-07-17 00:34:19 -07005146 kfree(dev->_tx);
5147
Jiri Pirkof001fde2009-05-05 02:48:28 +00005148 /* Flush device addresses */
5149 dev_addr_flush(dev);
5150
Herbert Xud565b0a2008-12-15 23:38:52 -08005151 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5152 netif_napi_del(p);
5153
Stephen Hemminger3041a062006-05-26 13:25:24 -07005154 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155 if (dev->reg_state == NETREG_UNINITIALIZED) {
5156 kfree((char *)dev - dev->padded);
5157 return;
5158 }
5159
5160 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5161 dev->reg_state = NETREG_RELEASED;
5162
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005163 /* will free via device release */
5164 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005166
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005167/**
5168 * synchronize_net - Synchronize with packet receive processing
5169 *
5170 * Wait for packets currently being received to be done.
5171 * Does not block later packets from starting.
5172 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005173void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174{
5175 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005176 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177}
5178
5179/**
5180 * unregister_netdevice - remove device from the kernel
5181 * @dev: device
5182 *
5183 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005184 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 *
5186 * Callers must hold the rtnl semaphore. You may want
5187 * unregister_netdev() instead of this.
5188 */
5189
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005190void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191{
Herbert Xua6620712007-12-12 19:21:56 -08005192 ASSERT_RTNL();
5193
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005194 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195 /* Finish processing unregister after unlock */
5196 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197}
5198
5199/**
5200 * unregister_netdev - remove device from the kernel
5201 * @dev: device
5202 *
5203 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005204 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205 *
5206 * This is just a wrapper for unregister_netdevice that takes
5207 * the rtnl semaphore. In general you want to use this and not
5208 * unregister_netdevice.
5209 */
5210void unregister_netdev(struct net_device *dev)
5211{
5212 rtnl_lock();
5213 unregister_netdevice(dev);
5214 rtnl_unlock();
5215}
5216
5217EXPORT_SYMBOL(unregister_netdev);
5218
Eric W. Biedermance286d32007-09-12 13:53:49 +02005219/**
5220 * dev_change_net_namespace - move device to different nethost namespace
5221 * @dev: device
5222 * @net: network namespace
5223 * @pat: If not NULL name pattern to try if the current device name
5224 * is already taken in the destination network namespace.
5225 *
5226 * This function shuts down a device interface and moves it
5227 * to a new network namespace. On success 0 is returned, on
5228 * a failure a netagive errno code is returned.
5229 *
5230 * Callers must hold the rtnl semaphore.
5231 */
5232
5233int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5234{
5235 char buf[IFNAMSIZ];
5236 const char *destname;
5237 int err;
5238
5239 ASSERT_RTNL();
5240
5241 /* Don't allow namespace local devices to be moved. */
5242 err = -EINVAL;
5243 if (dev->features & NETIF_F_NETNS_LOCAL)
5244 goto out;
5245
Eric W. Biederman38918452008-10-27 17:51:47 -07005246#ifdef CONFIG_SYSFS
5247 /* Don't allow real devices to be moved when sysfs
5248 * is enabled.
5249 */
5250 err = -EINVAL;
5251 if (dev->dev.parent)
5252 goto out;
5253#endif
5254
Eric W. Biedermance286d32007-09-12 13:53:49 +02005255 /* Ensure the device has been registrered */
5256 err = -EINVAL;
5257 if (dev->reg_state != NETREG_REGISTERED)
5258 goto out;
5259
5260 /* Get out if there is nothing todo */
5261 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005262 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005263 goto out;
5264
5265 /* Pick the destination device name, and ensure
5266 * we can use it in the destination network namespace.
5267 */
5268 err = -EEXIST;
5269 destname = dev->name;
5270 if (__dev_get_by_name(net, destname)) {
5271 /* We get here if we can't use the current device name */
5272 if (!pat)
5273 goto out;
5274 if (!dev_valid_name(pat))
5275 goto out;
5276 if (strchr(pat, '%')) {
5277 if (__dev_alloc_name(net, pat, buf) < 0)
5278 goto out;
5279 destname = buf;
5280 } else
5281 destname = pat;
5282 if (__dev_get_by_name(net, destname))
5283 goto out;
5284 }
5285
5286 /*
5287 * And now a mini version of register_netdevice unregister_netdevice.
5288 */
5289
5290 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005291 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005292
5293 /* And unlink it from device chain */
5294 err = -ENODEV;
5295 unlist_netdevice(dev);
5296
5297 synchronize_net();
5298
5299 /* Shutdown queueing discipline. */
5300 dev_shutdown(dev);
5301
5302 /* Notify protocols, that we are about to destroy
5303 this device. They should clean all the things.
5304 */
5305 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5306
5307 /*
5308 * Flush the unicast and multicast chains
5309 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005310 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005311 dev_addr_discard(dev);
5312
Eric W. Biederman38918452008-10-27 17:51:47 -07005313 netdev_unregister_kobject(dev);
5314
Eric W. Biedermance286d32007-09-12 13:53:49 +02005315 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005316 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005317
5318 /* Assign the new device name */
5319 if (destname != dev->name)
5320 strcpy(dev->name, destname);
5321
5322 /* If there is an ifindex conflict assign a new one */
5323 if (__dev_get_by_index(net, dev->ifindex)) {
5324 int iflink = (dev->iflink == dev->ifindex);
5325 dev->ifindex = dev_new_index(net);
5326 if (iflink)
5327 dev->iflink = dev->ifindex;
5328 }
5329
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005330 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005331 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005332 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005333
5334 /* Add the device back in the hashes */
5335 list_netdevice(dev);
5336
5337 /* Notify protocols, that a new device appeared. */
5338 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5339
5340 synchronize_net();
5341 err = 0;
5342out:
5343 return err;
5344}
5345
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346static int dev_cpu_callback(struct notifier_block *nfb,
5347 unsigned long action,
5348 void *ocpu)
5349{
5350 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005351 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352 struct sk_buff *skb;
5353 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5354 struct softnet_data *sd, *oldsd;
5355
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005356 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357 return NOTIFY_OK;
5358
5359 local_irq_disable();
5360 cpu = smp_processor_id();
5361 sd = &per_cpu(softnet_data, cpu);
5362 oldsd = &per_cpu(softnet_data, oldcpu);
5363
5364 /* Find end of our completion_queue. */
5365 list_skb = &sd->completion_queue;
5366 while (*list_skb)
5367 list_skb = &(*list_skb)->next;
5368 /* Append completion queue from offline CPU. */
5369 *list_skb = oldsd->completion_queue;
5370 oldsd->completion_queue = NULL;
5371
5372 /* Find end of our output_queue. */
5373 list_net = &sd->output_queue;
5374 while (*list_net)
5375 list_net = &(*list_net)->next_sched;
5376 /* Append output queue from offline CPU. */
5377 *list_net = oldsd->output_queue;
5378 oldsd->output_queue = NULL;
5379
5380 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5381 local_irq_enable();
5382
5383 /* Process offline CPU's input_pkt_queue */
5384 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5385 netif_rx(skb);
5386
5387 return NOTIFY_OK;
5388}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389
5390
Herbert Xu7f353bf2007-08-10 15:47:58 -07005391/**
Herbert Xub63365a2008-10-23 01:11:29 -07005392 * netdev_increment_features - increment feature set by one
5393 * @all: current feature set
5394 * @one: new feature set
5395 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005396 *
5397 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005398 * @one to the master device with current feature set @all. Will not
5399 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005400 */
Herbert Xub63365a2008-10-23 01:11:29 -07005401unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5402 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005403{
Herbert Xub63365a2008-10-23 01:11:29 -07005404 /* If device needs checksumming, downgrade to it. */
5405 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5406 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5407 else if (mask & NETIF_F_ALL_CSUM) {
5408 /* If one device supports v4/v6 checksumming, set for all. */
5409 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5410 !(all & NETIF_F_GEN_CSUM)) {
5411 all &= ~NETIF_F_ALL_CSUM;
5412 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5413 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005414
Herbert Xub63365a2008-10-23 01:11:29 -07005415 /* If one device supports hw checksumming, set for all. */
5416 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5417 all &= ~NETIF_F_ALL_CSUM;
5418 all |= NETIF_F_HW_CSUM;
5419 }
5420 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005421
Herbert Xub63365a2008-10-23 01:11:29 -07005422 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005423
Herbert Xub63365a2008-10-23 01:11:29 -07005424 one |= all & NETIF_F_ONE_FOR_ALL;
5425 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5426 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005427
5428 return all;
5429}
Herbert Xub63365a2008-10-23 01:11:29 -07005430EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005431
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005432static struct hlist_head *netdev_create_hash(void)
5433{
5434 int i;
5435 struct hlist_head *hash;
5436
5437 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5438 if (hash != NULL)
5439 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5440 INIT_HLIST_HEAD(&hash[i]);
5441
5442 return hash;
5443}
5444
Eric W. Biederman881d9662007-09-17 11:56:21 -07005445/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005446static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005447{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005448 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005449
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005450 net->dev_name_head = netdev_create_hash();
5451 if (net->dev_name_head == NULL)
5452 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005453
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005454 net->dev_index_head = netdev_create_hash();
5455 if (net->dev_index_head == NULL)
5456 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005457
5458 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005459
5460err_idx:
5461 kfree(net->dev_name_head);
5462err_name:
5463 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005464}
5465
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005466/**
5467 * netdev_drivername - network driver for the device
5468 * @dev: network device
5469 * @buffer: buffer for resulting name
5470 * @len: size of buffer
5471 *
5472 * Determine network driver for device.
5473 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005474char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005475{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005476 const struct device_driver *driver;
5477 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005478
5479 if (len <= 0 || !buffer)
5480 return buffer;
5481 buffer[0] = 0;
5482
5483 parent = dev->dev.parent;
5484
5485 if (!parent)
5486 return buffer;
5487
5488 driver = parent->driver;
5489 if (driver && driver->name)
5490 strlcpy(buffer, driver->name, len);
5491 return buffer;
5492}
5493
Pavel Emelyanov46650792007-10-08 20:38:39 -07005494static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005495{
5496 kfree(net->dev_name_head);
5497 kfree(net->dev_index_head);
5498}
5499
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005500static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005501 .init = netdev_init,
5502 .exit = netdev_exit,
5503};
5504
Pavel Emelyanov46650792007-10-08 20:38:39 -07005505static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005506{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005507 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005508 /*
5509 * Push all migratable of the network devices back to the
5510 * initial network namespace
5511 */
5512 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005513restart:
5514 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005515 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005516 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005517
5518 /* Ignore unmoveable devices (i.e. loopback) */
5519 if (dev->features & NETIF_F_NETNS_LOCAL)
5520 continue;
5521
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005522 /* Delete virtual devices */
5523 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5524 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005525 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005526 }
5527
Eric W. Biedermance286d32007-09-12 13:53:49 +02005528 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005529 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5530 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005531 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005532 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005533 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005534 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005535 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005536 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005537 }
5538 rtnl_unlock();
5539}
5540
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005541static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005542 .exit = default_device_exit,
5543};
5544
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545/*
5546 * Initialize the DEV module. At boot time this walks the device list and
5547 * unhooks any devices that fail to initialise (normally hardware not
5548 * present) and leaves us with a valid list of present and active devices.
5549 *
5550 */
5551
5552/*
5553 * This is called single threaded during boot, so no need
5554 * to take the rtnl semaphore.
5555 */
5556static int __init net_dev_init(void)
5557{
5558 int i, rc = -ENOMEM;
5559
5560 BUG_ON(!dev_boot_phase);
5561
Linus Torvalds1da177e2005-04-16 15:20:36 -07005562 if (dev_proc_init())
5563 goto out;
5564
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005565 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005566 goto out;
5567
5568 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005569 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570 INIT_LIST_HEAD(&ptype_base[i]);
5571
Eric W. Biederman881d9662007-09-17 11:56:21 -07005572 if (register_pernet_subsys(&netdev_net_ops))
5573 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574
5575 /*
5576 * Initialise the packet receive queues.
5577 */
5578
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005579 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580 struct softnet_data *queue;
5581
5582 queue = &per_cpu(softnet_data, i);
5583 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584 queue->completion_queue = NULL;
5585 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005586
5587 queue->backlog.poll = process_backlog;
5588 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005589 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005590 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 }
5592
Linus Torvalds1da177e2005-04-16 15:20:36 -07005593 dev_boot_phase = 0;
5594
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005595 /* The loopback device is special if any other network devices
5596 * is present in a network namespace the loopback device must
5597 * be present. Since we now dynamically allocate and free the
5598 * loopback device ensure this invariant is maintained by
5599 * keeping the loopback device as the first device on the
5600 * list of network devices. Ensuring the loopback devices
5601 * is the first device that appears and the last network device
5602 * that disappears.
5603 */
5604 if (register_pernet_device(&loopback_net_ops))
5605 goto out;
5606
5607 if (register_pernet_device(&default_device_ops))
5608 goto out;
5609
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005610 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5611 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612
5613 hotcpu_notifier(dev_cpu_callback, 0);
5614 dst_init();
5615 dev_mcast_init();
5616 rc = 0;
5617out:
5618 return rc;
5619}
5620
5621subsys_initcall(net_dev_init);
5622
Krishna Kumare88721f2009-02-18 17:55:02 -08005623static int __init initialize_hashrnd(void)
5624{
5625 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5626 return 0;
5627}
5628
5629late_initcall_sync(initialize_hashrnd);
5630
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631EXPORT_SYMBOL(__dev_get_by_index);
5632EXPORT_SYMBOL(__dev_get_by_name);
5633EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005634EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635EXPORT_SYMBOL(dev_add_pack);
5636EXPORT_SYMBOL(dev_alloc_name);
5637EXPORT_SYMBOL(dev_close);
5638EXPORT_SYMBOL(dev_get_by_flags);
5639EXPORT_SYMBOL(dev_get_by_index);
5640EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641EXPORT_SYMBOL(dev_open);
5642EXPORT_SYMBOL(dev_queue_xmit);
5643EXPORT_SYMBOL(dev_remove_pack);
5644EXPORT_SYMBOL(dev_set_allmulti);
5645EXPORT_SYMBOL(dev_set_promiscuity);
5646EXPORT_SYMBOL(dev_change_flags);
5647EXPORT_SYMBOL(dev_set_mtu);
5648EXPORT_SYMBOL(dev_set_mac_address);
5649EXPORT_SYMBOL(free_netdev);
5650EXPORT_SYMBOL(netdev_boot_setup_check);
5651EXPORT_SYMBOL(netdev_set_master);
5652EXPORT_SYMBOL(netdev_state_change);
5653EXPORT_SYMBOL(netif_receive_skb);
5654EXPORT_SYMBOL(netif_rx);
5655EXPORT_SYMBOL(register_gifconf);
5656EXPORT_SYMBOL(register_netdevice);
5657EXPORT_SYMBOL(register_netdevice_notifier);
5658EXPORT_SYMBOL(skb_checksum_help);
5659EXPORT_SYMBOL(synchronize_net);
5660EXPORT_SYMBOL(unregister_netdevice);
5661EXPORT_SYMBOL(unregister_netdevice_notifier);
5662EXPORT_SYMBOL(net_enable_timestamp);
5663EXPORT_SYMBOL(net_disable_timestamp);
5664EXPORT_SYMBOL(dev_get_flags);
5665
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667
5668EXPORT_PER_CPU_SYMBOL(softnet_data);