blob: 576a61574a936fcab9a7608d2d83c677127aebb6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Eric W. Biederman881d9662007-09-17 11:56:21 -0700200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204}
205
Eric W. Biederman881d9662007-09-17 11:56:21 -0700206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209}
210
Eric W. Biedermance286d32007-09-12 13:53:49 +0200211/* Device list insertion */
212static int list_netdevice(struct net_device *dev)
213{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900214 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200215
216 ASSERT_RTNL();
217
218 write_lock_bh(&dev_base_lock);
219 list_add_tail(&dev->dev_list, &net->dev_base_head);
220 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
221 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
222 write_unlock_bh(&dev_base_lock);
223 return 0;
224}
225
226/* Device list removal */
227static void unlist_netdevice(struct net_device *dev)
228{
229 ASSERT_RTNL();
230
231 /* Unlink dev from the device chain */
232 write_lock_bh(&dev_base_lock);
233 list_del(&dev->dev_list);
234 hlist_del(&dev->name_hlist);
235 hlist_del(&dev->index_hlist);
236 write_unlock_bh(&dev_base_lock);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/*
240 * Our notifier list
241 */
242
Alan Sternf07d5b92006-05-09 15:23:03 -0700243static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Device drivers call our routines to queue packets here. We empty the
247 * queue in the local softnet handler.
248 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700249
250DEFINE_PER_CPU(struct softnet_data, softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
275static const char *netdev_lock_name[] =
276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/**
386 * __dev_remove_pack - remove packet handler
387 * @pt: packet type declaration
388 *
389 * Remove a protocol handler that was previously added to the kernel
390 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
391 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900392 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 *
394 * The packet type might still be in use by receivers
395 * and must not be freed until after all the CPU's have gone
396 * through a quiescent state.
397 */
398void __dev_remove_pack(struct packet_type *pt)
399{
400 struct list_head *head;
401 struct packet_type *pt1;
402
403 spin_lock_bh(&ptype_lock);
404
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700405 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700407 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800408 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 list_for_each_entry(pt1, head, list) {
411 if (pt == pt1) {
412 list_del_rcu(&pt->list);
413 goto out;
414 }
415 }
416
417 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
418out:
419 spin_unlock_bh(&ptype_lock);
420}
421/**
422 * dev_remove_pack - remove packet handler
423 * @pt: packet type declaration
424 *
425 * Remove a protocol handler that was previously added to the kernel
426 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
427 * from the kernel lists and can be freed or reused once this function
428 * returns.
429 *
430 * This call sleeps to guarantee that no CPU is looking at the packet
431 * type after return.
432 */
433void dev_remove_pack(struct packet_type *pt)
434{
435 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 synchronize_net();
438}
439
440/******************************************************************************
441
442 Device Boot-time Settings Routines
443
444*******************************************************************************/
445
446/* Boot time configuration table */
447static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
448
449/**
450 * netdev_boot_setup_add - add new setup entry
451 * @name: name of the device
452 * @map: configured settings for the device
453 *
454 * Adds new setup entry to the dev_boot_setup list. The function
455 * returns 0 on error and 1 on success. This is a generic routine to
456 * all netdevices.
457 */
458static int netdev_boot_setup_add(char *name, struct ifmap *map)
459{
460 struct netdev_boot_setup *s;
461 int i;
462
463 s = dev_boot_setup;
464 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
465 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
466 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700467 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 memcpy(&s[i].map, map, sizeof(s[i].map));
469 break;
470 }
471 }
472
473 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
474}
475
476/**
477 * netdev_boot_setup_check - check boot time settings
478 * @dev: the netdevice
479 *
480 * Check boot time settings for the device.
481 * The found settings are set for the device to be used
482 * later in the device probing.
483 * Returns 0 if no settings found, 1 if they are.
484 */
485int netdev_boot_setup_check(struct net_device *dev)
486{
487 struct netdev_boot_setup *s = dev_boot_setup;
488 int i;
489
490 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
491 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700492 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 dev->irq = s[i].map.irq;
494 dev->base_addr = s[i].map.base_addr;
495 dev->mem_start = s[i].map.mem_start;
496 dev->mem_end = s[i].map.mem_end;
497 return 1;
498 }
499 }
500 return 0;
501}
502
503
504/**
505 * netdev_boot_base - get address from boot time settings
506 * @prefix: prefix for network device
507 * @unit: id for network device
508 *
509 * Check boot time settings for the base address of device.
510 * The found settings are set for the device to be used
511 * later in the device probing.
512 * Returns 0 if no settings found.
513 */
514unsigned long netdev_boot_base(const char *prefix, int unit)
515{
516 const struct netdev_boot_setup *s = dev_boot_setup;
517 char name[IFNAMSIZ];
518 int i;
519
520 sprintf(name, "%s%d", prefix, unit);
521
522 /*
523 * If device already registered then return base of 1
524 * to indicate not to probe for this interface
525 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700526 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 return 1;
528
529 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
530 if (!strcmp(name, s[i].name))
531 return s[i].map.base_addr;
532 return 0;
533}
534
535/*
536 * Saves at boot time configured settings for any netdevice.
537 */
538int __init netdev_boot_setup(char *str)
539{
540 int ints[5];
541 struct ifmap map;
542
543 str = get_options(str, ARRAY_SIZE(ints), ints);
544 if (!str || !*str)
545 return 0;
546
547 /* Save settings */
548 memset(&map, 0, sizeof(map));
549 if (ints[0] > 0)
550 map.irq = ints[1];
551 if (ints[0] > 1)
552 map.base_addr = ints[2];
553 if (ints[0] > 2)
554 map.mem_start = ints[3];
555 if (ints[0] > 3)
556 map.mem_end = ints[4];
557
558 /* Add new entry to the list */
559 return netdev_boot_setup_add(str, &map);
560}
561
562__setup("netdev=", netdev_boot_setup);
563
564/*******************************************************************************
565
566 Device Interface Subroutines
567
568*******************************************************************************/
569
570/**
571 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700572 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 * @name: name to find
574 *
575 * Find an interface by name. Must be called under RTNL semaphore
576 * or @dev_base_lock. If the name is found a pointer to the device
577 * is returned. If the name is not found then %NULL is returned. The
578 * reference counters are not incremented so the caller must be
579 * careful with locks.
580 */
581
Eric W. Biederman881d9662007-09-17 11:56:21 -0700582struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583{
584 struct hlist_node *p;
585
Eric W. Biederman881d9662007-09-17 11:56:21 -0700586 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct net_device *dev
588 = hlist_entry(p, struct net_device, name_hlist);
589 if (!strncmp(dev->name, name, IFNAMSIZ))
590 return dev;
591 }
592 return NULL;
593}
594
595/**
596 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700597 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 * @name: name to find
599 *
600 * Find an interface by name. This can be called from any
601 * context and does its own locking. The returned handle has
602 * the usage count incremented and the caller must use dev_put() to
603 * release it when it is no longer needed. %NULL is returned if no
604 * matching device is found.
605 */
606
Eric W. Biederman881d9662007-09-17 11:56:21 -0700607struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 struct net_device *dev;
610
611 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700612 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (dev)
614 dev_hold(dev);
615 read_unlock(&dev_base_lock);
616 return dev;
617}
618
619/**
620 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700621 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 * @ifindex: index of device
623 *
624 * Search for an interface by index. Returns %NULL if the device
625 * is not found or a pointer to the device. The device has not
626 * had its reference counter increased so the caller must be careful
627 * about locking. The caller must hold either the RTNL semaphore
628 * or @dev_base_lock.
629 */
630
Eric W. Biederman881d9662007-09-17 11:56:21 -0700631struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 struct hlist_node *p;
634
Eric W. Biederman881d9662007-09-17 11:56:21 -0700635 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 struct net_device *dev
637 = hlist_entry(p, struct net_device, index_hlist);
638 if (dev->ifindex == ifindex)
639 return dev;
640 }
641 return NULL;
642}
643
644
645/**
646 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700647 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 * @ifindex: index of device
649 *
650 * Search for an interface by index. Returns NULL if the device
651 * is not found or a pointer to the device. The device returned has
652 * had a reference added and the pointer is safe until the user calls
653 * dev_put to indicate they have finished with it.
654 */
655
Eric W. Biederman881d9662007-09-17 11:56:21 -0700656struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
658 struct net_device *dev;
659
660 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700661 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (dev)
663 dev_hold(dev);
664 read_unlock(&dev_base_lock);
665 return dev;
666}
667
668/**
669 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700670 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * @type: media type of device
672 * @ha: hardware address
673 *
674 * Search for an interface by MAC address. Returns NULL if the device
675 * is not found or a pointer to the device. The caller must hold the
676 * rtnl semaphore. The returned device has not had its ref count increased
677 * and the caller must therefore be careful about locking
678 *
679 * BUGS:
680 * If the API was consistent this would be __dev_get_by_hwaddr
681 */
682
Eric W. Biederman881d9662007-09-17 11:56:21 -0700683struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 struct net_device *dev;
686
687 ASSERT_RTNL();
688
Denis V. Lunev81103a52007-12-12 10:47:38 -0800689 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (dev->type == type &&
691 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700692 return dev;
693
694 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695}
696
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300697EXPORT_SYMBOL(dev_getbyhwaddr);
698
Eric W. Biederman881d9662007-09-17 11:56:21 -0700699struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700700{
701 struct net_device *dev;
702
703 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700704 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700705 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700706 return dev;
707
708 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700709}
710
711EXPORT_SYMBOL(__dev_getfirstbyhwtype);
712
Eric W. Biederman881d9662007-09-17 11:56:21 -0700713struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
715 struct net_device *dev;
716
717 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700718 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700719 if (dev)
720 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 rtnl_unlock();
722 return dev;
723}
724
725EXPORT_SYMBOL(dev_getfirstbyhwtype);
726
727/**
728 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700729 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 * @if_flags: IFF_* values
731 * @mask: bitmask of bits in if_flags to check
732 *
733 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900734 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 * had a reference added and the pointer is safe until the user calls
736 * dev_put to indicate they have finished with it.
737 */
738
Eric W. Biederman881d9662007-09-17 11:56:21 -0700739struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700741 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Pavel Emelianov7562f872007-05-03 15:13:45 -0700743 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700745 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (((dev->flags ^ if_flags) & mask) == 0) {
747 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700748 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 break;
750 }
751 }
752 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700753 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
756/**
757 * dev_valid_name - check if name is okay for network device
758 * @name: name string
759 *
760 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700761 * to allow sysfs to work. We also disallow any kind of
762 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800764int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700766 if (*name == '\0')
767 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700768 if (strlen(name) >= IFNAMSIZ)
769 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700770 if (!strcmp(name, ".") || !strcmp(name, ".."))
771 return 0;
772
773 while (*name) {
774 if (*name == '/' || isspace(*name))
775 return 0;
776 name++;
777 }
778 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200782 * __dev_alloc_name - allocate a name for a device
783 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200785 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 *
787 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700788 * id. It scans list of devices to build up a free map, then chooses
789 * the first empty slot. The caller must hold the dev_base or rtnl lock
790 * while allocating the name and adding the device in order to avoid
791 * duplicates.
792 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
793 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 */
795
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200796static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
798 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 const char *p;
800 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700801 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct net_device *d;
803
804 p = strnchr(name, IFNAMSIZ-1, '%');
805 if (p) {
806 /*
807 * Verify the string as this thing may have come from
808 * the user. There must be either one "%d" and no other "%"
809 * characters.
810 */
811 if (p[1] != 'd' || strchr(p + 2, '%'))
812 return -EINVAL;
813
814 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700815 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (!inuse)
817 return -ENOMEM;
818
Eric W. Biederman881d9662007-09-17 11:56:21 -0700819 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 if (!sscanf(d->name, name, &i))
821 continue;
822 if (i < 0 || i >= max_netdevices)
823 continue;
824
825 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200826 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 if (!strncmp(buf, d->name, IFNAMSIZ))
828 set_bit(i, inuse);
829 }
830
831 i = find_first_zero_bit(inuse, max_netdevices);
832 free_page((unsigned long) inuse);
833 }
834
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200835 snprintf(buf, IFNAMSIZ, name, i);
836 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /* It is possible to run out of possible slots
840 * when the name is long and there isn't enough space left
841 * for the digits, or if all bits are used.
842 */
843 return -ENFILE;
844}
845
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200846/**
847 * dev_alloc_name - allocate a name for a device
848 * @dev: device
849 * @name: name format string
850 *
851 * Passed a format string - eg "lt%d" it will try and find a suitable
852 * id. It scans list of devices to build up a free map, then chooses
853 * the first empty slot. The caller must hold the dev_base or rtnl lock
854 * while allocating the name and adding the device in order to avoid
855 * duplicates.
856 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
857 * Returns the number of the unit assigned or a negative errno code.
858 */
859
860int dev_alloc_name(struct net_device *dev, const char *name)
861{
862 char buf[IFNAMSIZ];
863 struct net *net;
864 int ret;
865
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900866 BUG_ON(!dev_net(dev));
867 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200868 ret = __dev_alloc_name(net, name, buf);
869 if (ret >= 0)
870 strlcpy(dev->name, buf, IFNAMSIZ);
871 return ret;
872}
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875/**
876 * dev_change_name - change name of a device
877 * @dev: device
878 * @newname: name (or format string) must be at least IFNAMSIZ
879 *
880 * Change name of a device, can pass format strings "eth%d".
881 * for wildcarding.
882 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700883int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884{
Herbert Xufcc5a032007-07-30 17:03:38 -0700885 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700887 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700888 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900891 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900893 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (dev->flags & IFF_UP)
895 return -EBUSY;
896
897 if (!dev_valid_name(newname))
898 return -EINVAL;
899
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700900 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
901 return 0;
902
Herbert Xufcc5a032007-07-30 17:03:38 -0700903 memcpy(oldname, dev->name, IFNAMSIZ);
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 if (strchr(newname, '%')) {
906 err = dev_alloc_name(dev, newname);
907 if (err < 0)
908 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700910 else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -EEXIST;
912 else
913 strlcpy(dev->name, newname, IFNAMSIZ);
914
Herbert Xufcc5a032007-07-30 17:03:38 -0700915rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700916 /* For now only devices in the initial network namespace
917 * are in sysfs.
918 */
919 if (net == &init_net) {
920 ret = device_rename(&dev->dev, dev->name);
921 if (ret) {
922 memcpy(dev->name, oldname, IFNAMSIZ);
923 return ret;
924 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700925 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700926
927 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600928 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700929 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700930 write_unlock_bh(&dev_base_lock);
931
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700932 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700933 ret = notifier_to_errno(ret);
934
935 if (ret) {
936 if (err) {
937 printk(KERN_ERR
938 "%s: name change rollback failed: %d.\n",
939 dev->name, ret);
940 } else {
941 err = ret;
942 memcpy(dev->name, oldname, IFNAMSIZ);
943 goto rollback;
944 }
945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 return err;
948}
949
950/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700951 * dev_set_alias - change ifalias of a device
952 * @dev: device
953 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700954 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700955 *
956 * Set ifalias for a device,
957 */
958int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
959{
960 ASSERT_RTNL();
961
962 if (len >= IFALIASZ)
963 return -EINVAL;
964
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700965 if (!len) {
966 if (dev->ifalias) {
967 kfree(dev->ifalias);
968 dev->ifalias = NULL;
969 }
970 return 0;
971 }
972
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700973 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
974 if (!dev->ifalias)
975 return -ENOMEM;
976
977 strlcpy(dev->ifalias, alias, len+1);
978 return len;
979}
980
981
982/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700983 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700984 * @dev: device to cause notification
985 *
986 * Called to indicate a device has changed features.
987 */
988void netdev_features_change(struct net_device *dev)
989{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700990 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700991}
992EXPORT_SYMBOL(netdev_features_change);
993
994/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 * netdev_state_change - device changes state
996 * @dev: device to cause notification
997 *
998 * Called to indicate a device has changed state. This function calls
999 * the notifier chains for netdev_chain and sends a NEWLINK message
1000 * to the routing socket.
1001 */
1002void netdev_state_change(struct net_device *dev)
1003{
1004 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001005 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1007 }
1008}
1009
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001010void netdev_bonding_change(struct net_device *dev)
1011{
1012 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1013}
1014EXPORT_SYMBOL(netdev_bonding_change);
1015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016/**
1017 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001018 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 * @name: name of interface
1020 *
1021 * If a network interface is not present and the process has suitable
1022 * privileges this function loads the module. If module loading is not
1023 * available in this kernel then it becomes a nop.
1024 */
1025
Eric W. Biederman881d9662007-09-17 11:56:21 -07001026void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001028 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
1030 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001031 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 read_unlock(&dev_base_lock);
1033
1034 if (!dev && capable(CAP_SYS_MODULE))
1035 request_module("%s", name);
1036}
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038/**
1039 * dev_open - prepare an interface for use.
1040 * @dev: device to open
1041 *
1042 * Takes a device from down to up state. The device's private open
1043 * function is invoked and then the multicast lists are loaded. Finally
1044 * the device is moved into the up state and a %NETDEV_UP message is
1045 * sent to the netdev notifier chain.
1046 *
1047 * Calling this function on an active interface is a nop. On a failure
1048 * a negative errno code is returned.
1049 */
1050int dev_open(struct net_device *dev)
1051{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001052 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001053 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001055 ASSERT_RTNL();
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 /*
1058 * Is it already up?
1059 */
1060
1061 if (dev->flags & IFF_UP)
1062 return 0;
1063
1064 /*
1065 * Is it even present?
1066 */
1067 if (!netif_device_present(dev))
1068 return -ENODEV;
1069
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001070 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1071 ret = notifier_to_errno(ret);
1072 if (ret)
1073 return ret;
1074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 /*
1076 * Call device private open method
1077 */
1078 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001079
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001080 if (ops->ndo_validate_addr)
1081 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001082
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001083 if (!ret && ops->ndo_open)
1084 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001086 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * If it went open OK then:
1088 */
1089
Jeff Garzikbada3392007-10-23 20:19:37 -07001090 if (ret)
1091 clear_bit(__LINK_STATE_START, &dev->state);
1092 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 /*
1094 * Set the flags.
1095 */
1096 dev->flags |= IFF_UP;
1097
1098 /*
Dan Williams649274d2009-01-11 00:20:39 -08001099 * Enable NET_DMA
1100 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001101 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001102
1103 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 * Initialize multicasting status
1105 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001106 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 /*
1109 * Wakeup transmit queue engine
1110 */
1111 dev_activate(dev);
1112
1113 /*
1114 * ... and announce new interface.
1115 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001116 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return ret;
1120}
1121
1122/**
1123 * dev_close - shutdown an interface.
1124 * @dev: device to shutdown
1125 *
1126 * This function moves an active device into down state. A
1127 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1128 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1129 * chain.
1130 */
1131int dev_close(struct net_device *dev)
1132{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001133 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001134 ASSERT_RTNL();
1135
David S. Miller9d5010d2007-09-12 14:33:25 +02001136 might_sleep();
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (!(dev->flags & IFF_UP))
1139 return 0;
1140
1141 /*
1142 * Tell people we are going down, so that they can
1143 * prepare to death, when device is still operating.
1144 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001145 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 clear_bit(__LINK_STATE_START, &dev->state);
1148
1149 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001150 * it can be even on different cpu. So just clear netif_running().
1151 *
1152 * dev->stop() will invoke napi_disable() on all of it's
1153 * napi_struct instances on this device.
1154 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001157 dev_deactivate(dev);
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /*
1160 * Call the device specific close. This cannot fail.
1161 * Only if device is UP
1162 *
1163 * We allow it to be called even after a DETACH hot-plug
1164 * event.
1165 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001166 if (ops->ndo_stop)
1167 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
1169 /*
1170 * Device is now down.
1171 */
1172
1173 dev->flags &= ~IFF_UP;
1174
1175 /*
1176 * Tell people we are down
1177 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001178 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Dan Williams649274d2009-01-11 00:20:39 -08001180 /*
1181 * Shutdown NET_DMA
1182 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001183 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 return 0;
1186}
1187
1188
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001189/**
1190 * dev_disable_lro - disable Large Receive Offload on a device
1191 * @dev: device
1192 *
1193 * Disable Large Receive Offload (LRO) on a net device. Must be
1194 * called under RTNL. This is needed if received packets may be
1195 * forwarded to another interface.
1196 */
1197void dev_disable_lro(struct net_device *dev)
1198{
1199 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1200 dev->ethtool_ops->set_flags) {
1201 u32 flags = dev->ethtool_ops->get_flags(dev);
1202 if (flags & ETH_FLAG_LRO) {
1203 flags &= ~ETH_FLAG_LRO;
1204 dev->ethtool_ops->set_flags(dev, flags);
1205 }
1206 }
1207 WARN_ON(dev->features & NETIF_F_LRO);
1208}
1209EXPORT_SYMBOL(dev_disable_lro);
1210
1211
Eric W. Biederman881d9662007-09-17 11:56:21 -07001212static int dev_boot_phase = 1;
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214/*
1215 * Device change register/unregister. These are not inline or static
1216 * as we export them to the world.
1217 */
1218
1219/**
1220 * register_netdevice_notifier - register a network notifier block
1221 * @nb: notifier
1222 *
1223 * Register a notifier to be called when network device events occur.
1224 * The notifier passed is linked into the kernel structures and must
1225 * not be reused until it has been unregistered. A negative errno code
1226 * is returned on a failure.
1227 *
1228 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001229 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 * view of the network device list.
1231 */
1232
1233int register_netdevice_notifier(struct notifier_block *nb)
1234{
1235 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001236 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001237 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 int err;
1239
1240 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001241 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001242 if (err)
1243 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001244 if (dev_boot_phase)
1245 goto unlock;
1246 for_each_net(net) {
1247 for_each_netdev(net, dev) {
1248 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1249 err = notifier_to_errno(err);
1250 if (err)
1251 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Eric W. Biederman881d9662007-09-17 11:56:21 -07001253 if (!(dev->flags & IFF_UP))
1254 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001255
Eric W. Biederman881d9662007-09-17 11:56:21 -07001256 nb->notifier_call(nb, NETDEV_UP, dev);
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001259
1260unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 rtnl_unlock();
1262 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001263
1264rollback:
1265 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001266 for_each_net(net) {
1267 for_each_netdev(net, dev) {
1268 if (dev == last)
1269 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001270
Eric W. Biederman881d9662007-09-17 11:56:21 -07001271 if (dev->flags & IFF_UP) {
1272 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1273 nb->notifier_call(nb, NETDEV_DOWN, dev);
1274 }
1275 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001276 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001277 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001278
1279 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001280 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
1283/**
1284 * unregister_netdevice_notifier - unregister a network notifier block
1285 * @nb: notifier
1286 *
1287 * Unregister a notifier previously registered by
1288 * register_netdevice_notifier(). The notifier is unlinked into the
1289 * kernel structures and may then be reused. A negative errno code
1290 * is returned on a failure.
1291 */
1292
1293int unregister_netdevice_notifier(struct notifier_block *nb)
1294{
Herbert Xu9f514952006-03-25 01:24:25 -08001295 int err;
1296
1297 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001298 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001299 rtnl_unlock();
1300 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303/**
1304 * call_netdevice_notifiers - call all network notifier blocks
1305 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001306 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 *
1308 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001309 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 */
1311
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001312int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001314 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
1316
1317/* When > 0 there are consumers of rx skb time stamps */
1318static atomic_t netstamp_needed = ATOMIC_INIT(0);
1319
1320void net_enable_timestamp(void)
1321{
1322 atomic_inc(&netstamp_needed);
1323}
1324
1325void net_disable_timestamp(void)
1326{
1327 atomic_dec(&netstamp_needed);
1328}
1329
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001330static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001333 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001334 else
1335 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336}
1337
1338/*
1339 * Support routine. Sends outgoing frames to any network
1340 * taps currently in use.
1341 */
1342
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001343static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
1345 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001346
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001347#ifdef CONFIG_NET_CLS_ACT
1348 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1349 net_timestamp(skb);
1350#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001351 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001352#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 rcu_read_lock();
1355 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1356 /* Never send packets back to the socket
1357 * they originated from - MvS (miquels@drinkel.ow.org)
1358 */
1359 if ((ptype->dev == dev || !ptype->dev) &&
1360 (ptype->af_packet_priv == NULL ||
1361 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1362 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1363 if (!skb2)
1364 break;
1365
1366 /* skb->nh should be correctly
1367 set by sender, so that the second statement is
1368 just protection against buggy protocols.
1369 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001370 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001372 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001373 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 if (net_ratelimit())
1375 printk(KERN_CRIT "protocol %04x is "
1376 "buggy, dev %s\n",
1377 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001378 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
1380
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001381 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001383 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 }
1385 }
1386 rcu_read_unlock();
1387}
1388
Denis Vlasenko56079432006-03-29 15:57:29 -08001389
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001390static inline void __netif_reschedule(struct Qdisc *q)
1391{
1392 struct softnet_data *sd;
1393 unsigned long flags;
1394
1395 local_irq_save(flags);
1396 sd = &__get_cpu_var(softnet_data);
1397 q->next_sched = sd->output_queue;
1398 sd->output_queue = q;
1399 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1400 local_irq_restore(flags);
1401}
1402
David S. Miller37437bb2008-07-16 02:15:04 -07001403void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001404{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001405 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1406 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001407}
1408EXPORT_SYMBOL(__netif_schedule);
1409
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001410void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001411{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001412 if (atomic_dec_and_test(&skb->users)) {
1413 struct softnet_data *sd;
1414 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 local_irq_save(flags);
1417 sd = &__get_cpu_var(softnet_data);
1418 skb->next = sd->completion_queue;
1419 sd->completion_queue = skb;
1420 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1421 local_irq_restore(flags);
1422 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001423}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001424EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001425
1426void dev_kfree_skb_any(struct sk_buff *skb)
1427{
1428 if (in_irq() || irqs_disabled())
1429 dev_kfree_skb_irq(skb);
1430 else
1431 dev_kfree_skb(skb);
1432}
1433EXPORT_SYMBOL(dev_kfree_skb_any);
1434
1435
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001436/**
1437 * netif_device_detach - mark device as removed
1438 * @dev: network device
1439 *
1440 * Mark device as removed from system and therefore no longer available.
1441 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001442void netif_device_detach(struct net_device *dev)
1443{
1444 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1445 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001446 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001447 }
1448}
1449EXPORT_SYMBOL(netif_device_detach);
1450
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001451/**
1452 * netif_device_attach - mark device as attached
1453 * @dev: network device
1454 *
1455 * Mark device as attached from system and restart if needed.
1456 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001457void netif_device_attach(struct net_device *dev)
1458{
1459 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1460 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001461 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001462 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001463 }
1464}
1465EXPORT_SYMBOL(netif_device_attach);
1466
Ben Hutchings6de329e2008-06-16 17:02:28 -07001467static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1468{
1469 return ((features & NETIF_F_GEN_CSUM) ||
1470 ((features & NETIF_F_IP_CSUM) &&
1471 protocol == htons(ETH_P_IP)) ||
1472 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001473 protocol == htons(ETH_P_IPV6)) ||
1474 ((features & NETIF_F_FCOE_CRC) &&
1475 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001476}
1477
1478static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1479{
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1481 return true;
1482
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1487 return true;
1488 }
1489
1490 return false;
1491}
Denis Vlasenko56079432006-03-29 15:57:29 -08001492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493/*
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1496 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001497int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498{
Al Virod3bc23e2006-11-14 21:24:49 -08001499 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001500 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
Patrick McHardy84fa7932006-08-29 16:44:56 -07001502 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001503 goto out_set_summed;
1504
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 }
1509
Herbert Xua0308472007-10-15 01:47:15 -07001510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1513
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1516
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1520 if (ret)
1521 goto out;
1522 }
1523
Herbert Xua0308472007-10-15 01:47:15 -07001524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001525out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001527out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return ret;
1529}
1530
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001531/**
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001534 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001535 *
1536 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001537 *
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001540 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001541struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001542{
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001545 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001546 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001547
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001548 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001549 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001550 __skb_pull(skb, skb->mac_len);
1551
Herbert Xu67fd1a72009-01-19 16:26:44 -08001552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1555
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1558
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1560 "ip_summed=%d",
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1564
Herbert Xua430a432006-07-08 13:34:56 -07001565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1568 }
1569
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001570 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1578 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001581 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001582 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001583 break;
1584 }
1585 }
1586 rcu_read_unlock();
1587
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001588 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001589
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001590 return segs;
1591}
1592
1593EXPORT_SYMBOL(skb_gso_segment);
1594
Herbert Xufb286bb2005-11-10 13:01:24 -08001595/* Take action when hardware reception checksum errors are detected. */
1596#ifdef CONFIG_BUG
1597void netdev_rx_csum_fault(struct net_device *dev)
1598{
1599 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001600 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001601 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001602 dump_stack();
1603 }
1604}
1605EXPORT_SYMBOL(netdev_rx_csum_fault);
1606#endif
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608/* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1611 */
1612
1613static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1614{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001615#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 int i;
1617
1618 if (dev->features & NETIF_F_HIGHDMA)
1619 return 0;
1620
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1623 return 1;
1624
Herbert Xu3d3a8532006-06-27 13:33:10 -07001625#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 return 0;
1627}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001629struct dev_gso_cb {
1630 void (*destructor)(struct sk_buff *skb);
1631};
1632
1633#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1634
1635static void dev_gso_skb_destructor(struct sk_buff *skb)
1636{
1637 struct dev_gso_cb *cb;
1638
1639 do {
1640 struct sk_buff *nskb = skb->next;
1641
1642 skb->next = nskb->next;
1643 nskb->next = NULL;
1644 kfree_skb(nskb);
1645 } while (skb->next);
1646
1647 cb = DEV_GSO_CB(skb);
1648 if (cb->destructor)
1649 cb->destructor(skb);
1650}
1651
1652/**
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1655 *
1656 * This function segments the given skb and stores the list of segments
1657 * in skb->next.
1658 */
1659static int dev_gso_segment(struct sk_buff *skb)
1660{
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1664 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001665
Herbert Xu576a30e2006-06-27 13:22:38 -07001666 segs = skb_gso_segment(skb, features);
1667
1668 /* Verifying header integrity only. */
1669 if (!segs)
1670 return 0;
1671
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001672 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001673 return PTR_ERR(segs);
1674
1675 skb->next = segs;
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1678
1679 return 0;
1680}
1681
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001682int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001684{
Stephen Hemminger00829822008-11-20 20:14:53 -08001685 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001686 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001687
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001688 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001689 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001690 dev_queue_xmit_nit(skb, dev);
1691
Herbert Xu576a30e2006-06-27 13:22:38 -07001692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1694 goto out_kfree_skb;
1695 if (skb->next)
1696 goto gso;
1697 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001698
Eric Dumazet93f154b2009-05-18 22:19:19 -07001699 /*
1700 * If device doesnt need skb->dst, release it right now while
1701 * its hot in this cpu cache
1702 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001703 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1704 skb_dst_drop(skb);
1705
Patrick Ohlyac45f602009-02-12 05:03:37 +00001706 rc = ops->ndo_start_xmit(skb, dev);
Eric Dumazet08baf562009-05-25 22:58:01 -07001707 if (rc == 0)
1708 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001709 /*
1710 * TODO: if skb_orphan() was called by
1711 * dev->hard_start_xmit() (for example, the unmodified
1712 * igb driver does that; bnx2 doesn't), then
1713 * skb_tx_software_timestamp() will be unable to send
1714 * back the time stamp.
1715 *
1716 * How can this be prevented? Always create another
1717 * reference to the socket before calling
1718 * dev->hard_start_xmit()? Prevent that skb_orphan()
1719 * does anything in dev->hard_start_xmit() by clearing
1720 * the skb destructor before the call and restoring it
1721 * afterwards, then doing the skb_orphan() ourselves?
1722 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001723 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001724 }
1725
Herbert Xu576a30e2006-06-27 13:22:38 -07001726gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001727 do {
1728 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001729
1730 skb->next = nskb->next;
1731 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001732 rc = ops->ndo_start_xmit(nskb, dev);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001733 if (unlikely(rc)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001734 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001735 skb->next = nskb;
1736 return rc;
1737 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001738 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001739 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001740 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001742
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001743 skb->destructor = DEV_GSO_CB(skb)->destructor;
1744
1745out_kfree_skb:
1746 kfree_skb(skb);
1747 return 0;
1748}
1749
David S. Miller70192982009-01-27 16:34:47 -08001750static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001751
Stephen Hemminger92477442009-03-21 13:39:26 -07001752u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001753{
David S. Miller70192982009-01-27 16:34:47 -08001754 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001755
David S. Miller513de112009-05-03 14:43:10 -07001756 if (skb_rx_queue_recorded(skb)) {
1757 hash = skb_get_rx_queue(skb);
1758 while (unlikely (hash >= dev->real_num_tx_queues))
1759 hash -= dev->real_num_tx_queues;
1760 return hash;
1761 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001762
1763 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001764 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001765 else
David S. Miller70192982009-01-27 16:34:47 -08001766 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001767
David S. Miller70192982009-01-27 16:34:47 -08001768 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001769
David S. Millerb6b2fed2008-07-21 09:48:06 -07001770 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001771}
Stephen Hemminger92477442009-03-21 13:39:26 -07001772EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001773
David S. Millere8a04642008-07-17 00:34:19 -07001774static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1775 struct sk_buff *skb)
1776{
Stephen Hemminger00829822008-11-20 20:14:53 -08001777 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001778 u16 queue_index = 0;
1779
Stephen Hemminger00829822008-11-20 20:14:53 -08001780 if (ops->ndo_select_queue)
1781 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001782 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001783 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001784
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001785 skb_set_queue_mapping(skb, queue_index);
1786 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001787}
1788
Dave Jonesd29f7492008-07-22 14:09:06 -07001789/**
1790 * dev_queue_xmit - transmit a buffer
1791 * @skb: buffer to transmit
1792 *
1793 * Queue a buffer for transmission to a network device. The caller must
1794 * have set the device and priority and built the buffer before calling
1795 * this function. The function can be called from an interrupt.
1796 *
1797 * A negative errno code is returned on a failure. A success does not
1798 * guarantee the frame will be transmitted as it may be dropped due
1799 * to congestion or traffic shaping.
1800 *
1801 * -----------------------------------------------------------------------------------
1802 * I notice this method can also return errors from the queue disciplines,
1803 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1804 * be positive.
1805 *
1806 * Regardless of the return value, the skb is consumed, so it is currently
1807 * difficult to retry a send to this method. (You can bump the ref count
1808 * before sending to hold a reference for retry if you are careful.)
1809 *
1810 * When calling this method, interrupts MUST be enabled. This is because
1811 * the BH enable code must have IRQs enabled so that it will not deadlock.
1812 * --BLG
1813 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814int dev_queue_xmit(struct sk_buff *skb)
1815{
1816 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001817 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 struct Qdisc *q;
1819 int rc = -ENOMEM;
1820
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001821 /* GSO will handle the following emulations directly. */
1822 if (netif_needs_gso(dev, skb))
1823 goto gso;
1824
David S. Miller4cf704f2009-06-09 00:18:51 -07001825 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001827 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 goto out_kfree_skb;
1829
1830 /* Fragmented skb is linearized if device does not support SG,
1831 * or if at least one of fragments is in highmem and device
1832 * does not support DMA from it.
1833 */
1834 if (skb_shinfo(skb)->nr_frags &&
1835 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001836 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 goto out_kfree_skb;
1838
1839 /* If packet is not checksummed and device does not support
1840 * checksumming for this protocol, complete checksumming here.
1841 */
Herbert Xu663ead32007-04-09 11:59:07 -07001842 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1843 skb_set_transport_header(skb, skb->csum_start -
1844 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001845 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1846 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001849gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001850 /* Disable soft irqs for various locks below. Also
1851 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001853 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
David S. Millereae792b2008-07-15 03:03:33 -07001855 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001856 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001857
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858#ifdef CONFIG_NET_CLS_ACT
1859 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1860#endif
1861 if (q->enqueue) {
David S. Miller5fb66222008-08-02 20:02:43 -07001862 spinlock_t *root_lock = qdisc_lock(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
David S. Miller37437bb2008-07-16 02:15:04 -07001864 spin_lock(root_lock);
1865
David S. Millera9312ae2008-08-17 21:51:03 -07001866 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
David S. Miller96d20312008-08-17 23:37:16 -07001867 kfree_skb(skb);
David S. Millera9312ae2008-08-17 21:51:03 -07001868 rc = NET_XMIT_DROP;
David S. Miller96d20312008-08-17 23:37:16 -07001869 } else {
1870 rc = qdisc_enqueue_root(skb, q);
1871 qdisc_run(q);
David S. Millera9312ae2008-08-17 21:51:03 -07001872 }
David S. Miller37437bb2008-07-16 02:15:04 -07001873 spin_unlock(root_lock);
1874
David S. Miller37437bb2008-07-16 02:15:04 -07001875 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 }
1877
1878 /* The device has no queue. Common case for software devices:
1879 loopback, all the sorts of tunnels...
1880
Herbert Xu932ff272006-06-09 12:20:56 -07001881 Really, it is unlikely that netif_tx_lock protection is necessary
1882 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 counters.)
1884 However, it is possible, that they rely on protection
1885 made by us here.
1886
1887 Check this and shot the lock. It is not prone from deadlocks.
1888 Either shot noqueue qdisc, it is even simpler 8)
1889 */
1890 if (dev->flags & IFF_UP) {
1891 int cpu = smp_processor_id(); /* ok because BHs are off */
1892
David S. Millerc773e842008-07-08 23:13:53 -07001893 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
David S. Millerc773e842008-07-08 23:13:53 -07001895 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001897 if (!netif_tx_queue_stopped(txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 rc = 0;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001899 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001900 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 goto out;
1902 }
1903 }
David S. Millerc773e842008-07-08 23:13:53 -07001904 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 if (net_ratelimit())
1906 printk(KERN_CRIT "Virtual device %s asks to "
1907 "queue packet!\n", dev->name);
1908 } else {
1909 /* Recursion is detected! It is possible,
1910 * unfortunately */
1911 if (net_ratelimit())
1912 printk(KERN_CRIT "Dead loop on virtual device "
1913 "%s, fix it urgently!\n", dev->name);
1914 }
1915 }
1916
1917 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001918 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919
1920out_kfree_skb:
1921 kfree_skb(skb);
1922 return rc;
1923out:
Herbert Xud4828d82006-06-22 02:28:18 -07001924 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return rc;
1926}
1927
1928
1929/*=======================================================================
1930 Receiver routines
1931 =======================================================================*/
1932
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001933int netdev_max_backlog __read_mostly = 1000;
1934int netdev_budget __read_mostly = 300;
1935int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
1937DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1938
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940/**
1941 * netif_rx - post buffer to the network code
1942 * @skb: buffer to post
1943 *
1944 * This function receives a packet from a device driver and queues it for
1945 * the upper (protocol) levels to process. It always succeeds. The buffer
1946 * may be dropped during processing for congestion control or by the
1947 * protocol layers.
1948 *
1949 * return values:
1950 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 * NET_RX_DROP (packet was dropped)
1952 *
1953 */
1954
1955int netif_rx(struct sk_buff *skb)
1956{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 struct softnet_data *queue;
1958 unsigned long flags;
1959
1960 /* if netpoll wants it, pretend we never saw it */
1961 if (netpoll_rx(skb))
1962 return NET_RX_DROP;
1963
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001964 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001965 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
1967 /*
1968 * The code is rearranged so that the path is the most
1969 * short when CPU is congested, but is still operating.
1970 */
1971 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 queue = &__get_cpu_var(softnet_data);
1973
1974 __get_cpu_var(netdev_rx_stat).total++;
1975 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1976 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07001980 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 }
1982
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001983 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 goto enqueue;
1985 }
1986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 __get_cpu_var(netdev_rx_stat).dropped++;
1988 local_irq_restore(flags);
1989
1990 kfree_skb(skb);
1991 return NET_RX_DROP;
1992}
1993
1994int netif_rx_ni(struct sk_buff *skb)
1995{
1996 int err;
1997
1998 preempt_disable();
1999 err = netif_rx(skb);
2000 if (local_softirq_pending())
2001 do_softirq();
2002 preempt_enable();
2003
2004 return err;
2005}
2006
2007EXPORT_SYMBOL(netif_rx_ni);
2008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009static void net_tx_action(struct softirq_action *h)
2010{
2011 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2012
2013 if (sd->completion_queue) {
2014 struct sk_buff *clist;
2015
2016 local_irq_disable();
2017 clist = sd->completion_queue;
2018 sd->completion_queue = NULL;
2019 local_irq_enable();
2020
2021 while (clist) {
2022 struct sk_buff *skb = clist;
2023 clist = clist->next;
2024
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002025 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 __kfree_skb(skb);
2027 }
2028 }
2029
2030 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002031 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
2033 local_irq_disable();
2034 head = sd->output_queue;
2035 sd->output_queue = NULL;
2036 local_irq_enable();
2037
2038 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002039 struct Qdisc *q = head;
2040 spinlock_t *root_lock;
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 head = head->next_sched;
2043
David S. Miller5fb66222008-08-02 20:02:43 -07002044 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002045 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002046 smp_mb__before_clear_bit();
2047 clear_bit(__QDISC_STATE_SCHED,
2048 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002049 qdisc_run(q);
2050 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002052 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002053 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002054 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002055 } else {
2056 smp_mb__before_clear_bit();
2057 clear_bit(__QDISC_STATE_SCHED,
2058 &q->state);
2059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 }
2061 }
2062 }
2063}
2064
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002065static inline int deliver_skb(struct sk_buff *skb,
2066 struct packet_type *pt_prev,
2067 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002070 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
2073#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002074
2075#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2076/* This hook is defined here for ATM LANE */
2077int (*br_fdb_test_addr_hook)(struct net_device *dev,
2078 unsigned char *addr) __read_mostly;
2079EXPORT_SYMBOL(br_fdb_test_addr_hook);
2080#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Stephen Hemminger6229e362007-03-21 13:38:47 -07002082/*
2083 * If bridge module is loaded call bridging hook.
2084 * returns NULL if packet was consumed.
2085 */
2086struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2087 struct sk_buff *skb) __read_mostly;
Michał Mirosławda678292009-06-05 05:35:28 +00002088EXPORT_SYMBOL(br_handle_frame_hook);
2089
Stephen Hemminger6229e362007-03-21 13:38:47 -07002090static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2091 struct packet_type **pt_prev, int *ret,
2092 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
2094 struct net_bridge_port *port;
2095
Stephen Hemminger6229e362007-03-21 13:38:47 -07002096 if (skb->pkt_type == PACKET_LOOPBACK ||
2097 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2098 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002101 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002103 }
2104
Stephen Hemminger6229e362007-03-21 13:38:47 -07002105 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002108#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109#endif
2110
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002111#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2112struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2113EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2114
2115static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2116 struct packet_type **pt_prev,
2117 int *ret,
2118 struct net_device *orig_dev)
2119{
2120 if (skb->dev->macvlan_port == NULL)
2121 return skb;
2122
2123 if (*pt_prev) {
2124 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2125 *pt_prev = NULL;
2126 }
2127 return macvlan_handle_frame_hook(skb);
2128}
2129#else
2130#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2131#endif
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133#ifdef CONFIG_NET_CLS_ACT
2134/* TODO: Maybe we should just force sch_ingress to be compiled in
2135 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2136 * a compare and 2 stores extra right now if we dont have it on
2137 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002138 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 * the ingress scheduler, you just cant add policies on ingress.
2140 *
2141 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002142static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002145 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002146 struct netdev_queue *rxq;
2147 int result = TC_ACT_OK;
2148 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002149
Herbert Xuf697c3e2007-10-14 00:38:47 -07002150 if (MAX_RED_LOOP < ttl++) {
2151 printk(KERN_WARNING
2152 "Redir loop detected Dropping packet (%d->%d)\n",
2153 skb->iif, dev->ifindex);
2154 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
Herbert Xuf697c3e2007-10-14 00:38:47 -07002157 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2158 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2159
David S. Miller555353c2008-07-08 17:33:13 -07002160 rxq = &dev->rx_queue;
2161
David S. Miller83874002008-07-17 00:53:03 -07002162 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002163 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002164 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002165 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2166 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002167 spin_unlock(qdisc_lock(q));
2168 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return result;
2171}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002172
2173static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2174 struct packet_type **pt_prev,
2175 int *ret, struct net_device *orig_dev)
2176{
David S. Miller8d50b532008-07-30 02:37:46 -07002177 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002178 goto out;
2179
2180 if (*pt_prev) {
2181 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2182 *pt_prev = NULL;
2183 } else {
2184 /* Huh? Why does turning on AF_PACKET affect this? */
2185 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2186 }
2187
2188 switch (ing_filter(skb)) {
2189 case TC_ACT_SHOT:
2190 case TC_ACT_STOLEN:
2191 kfree_skb(skb);
2192 return NULL;
2193 }
2194
2195out:
2196 skb->tc_verd = 0;
2197 return skb;
2198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199#endif
2200
Patrick McHardybc1d0412008-07-14 22:49:30 -07002201/*
2202 * netif_nit_deliver - deliver received packets to network taps
2203 * @skb: buffer
2204 *
2205 * This function is used to deliver incoming packets to network
2206 * taps. It should be used when the normal netif_receive_skb path
2207 * is bypassed, for example because of VLAN acceleration.
2208 */
2209void netif_nit_deliver(struct sk_buff *skb)
2210{
2211 struct packet_type *ptype;
2212
2213 if (list_empty(&ptype_all))
2214 return;
2215
2216 skb_reset_network_header(skb);
2217 skb_reset_transport_header(skb);
2218 skb->mac_len = skb->network_header - skb->mac_header;
2219
2220 rcu_read_lock();
2221 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2222 if (!ptype->dev || ptype->dev == skb->dev)
2223 deliver_skb(skb, ptype, skb->dev);
2224 }
2225 rcu_read_unlock();
2226}
2227
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002228/**
2229 * netif_receive_skb - process receive buffer from network
2230 * @skb: buffer to process
2231 *
2232 * netif_receive_skb() is the main receive data processing function.
2233 * It always succeeds. The buffer may be dropped during processing
2234 * for congestion control or by the protocol layers.
2235 *
2236 * This function may only be called from softirq context and interrupts
2237 * should be enabled.
2238 *
2239 * Return values (usually ignored):
2240 * NET_RX_SUCCESS: no congestion
2241 * NET_RX_DROP: packet was dropped
2242 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243int netif_receive_skb(struct sk_buff *skb)
2244{
2245 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002246 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002247 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002249 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002251 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2252 return NET_RX_SUCCESS;
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002255 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 return NET_RX_DROP;
2257
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002258 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002259 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Patrick McHardyc01003c2007-03-29 11:46:52 -07002261 if (!skb->iif)
2262 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002263
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002264 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002265 orig_dev = skb->dev;
2266 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002267 if (skb_bond_should_drop(skb))
2268 null_or_orig = orig_dev; /* deliver only exact match */
2269 else
2270 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002271 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 __get_cpu_var(netdev_rx_stat).total++;
2274
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002275 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002276 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002277 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
2279 pt_prev = NULL;
2280
2281 rcu_read_lock();
2282
2283#ifdef CONFIG_NET_CLS_ACT
2284 if (skb->tc_verd & TC_NCLS) {
2285 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2286 goto ncls;
2287 }
2288#endif
2289
2290 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002291 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2292 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002293 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002294 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 pt_prev = ptype;
2296 }
2297 }
2298
2299#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002300 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2301 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303ncls:
2304#endif
2305
Stephen Hemminger6229e362007-03-21 13:38:47 -07002306 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2307 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002309 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2310 if (!skb)
2311 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
Herbert Xu9a279bc2009-02-04 16:55:27 -08002313 skb_orphan(skb);
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002316 list_for_each_entry_rcu(ptype,
2317 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002319 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2320 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002321 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002322 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 pt_prev = ptype;
2324 }
2325 }
2326
2327 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002328 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 } else {
2330 kfree_skb(skb);
2331 /* Jamal, now you will not able to escape explaining
2332 * me how you were going to use this. :-)
2333 */
2334 ret = NET_RX_DROP;
2335 }
2336
2337out:
2338 rcu_read_unlock();
2339 return ret;
2340}
2341
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002342/* Network device is going away, flush any packets still pending */
2343static void flush_backlog(void *arg)
2344{
2345 struct net_device *dev = arg;
2346 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2347 struct sk_buff *skb, *tmp;
2348
2349 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2350 if (skb->dev == dev) {
2351 __skb_unlink(skb, &queue->input_pkt_queue);
2352 kfree_skb(skb);
2353 }
2354}
2355
Herbert Xud565b0a2008-12-15 23:38:52 -08002356static int napi_gro_complete(struct sk_buff *skb)
2357{
2358 struct packet_type *ptype;
2359 __be16 type = skb->protocol;
2360 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2361 int err = -ENOENT;
2362
Herbert Xufc59f9a2009-04-14 15:11:06 -07002363 if (NAPI_GRO_CB(skb)->count == 1) {
2364 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002365 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002366 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002367
2368 rcu_read_lock();
2369 list_for_each_entry_rcu(ptype, head, list) {
2370 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2371 continue;
2372
2373 err = ptype->gro_complete(skb);
2374 break;
2375 }
2376 rcu_read_unlock();
2377
2378 if (err) {
2379 WARN_ON(&ptype->list == head);
2380 kfree_skb(skb);
2381 return NET_RX_SUCCESS;
2382 }
2383
2384out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002385 return netif_receive_skb(skb);
2386}
2387
2388void napi_gro_flush(struct napi_struct *napi)
2389{
2390 struct sk_buff *skb, *next;
2391
2392 for (skb = napi->gro_list; skb; skb = next) {
2393 next = skb->next;
2394 skb->next = NULL;
2395 napi_gro_complete(skb);
2396 }
2397
Herbert Xu4ae55442009-02-08 18:00:36 +00002398 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002399 napi->gro_list = NULL;
2400}
2401EXPORT_SYMBOL(napi_gro_flush);
2402
Herbert Xu96e93ea2009-01-06 10:49:34 -08002403int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002404{
2405 struct sk_buff **pp = NULL;
2406 struct packet_type *ptype;
2407 __be16 type = skb->protocol;
2408 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002409 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002410 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002411 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002412
2413 if (!(skb->dev->features & NETIF_F_GRO))
2414 goto normal;
2415
David S. Miller4cf704f2009-06-09 00:18:51 -07002416 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002417 goto normal;
2418
Herbert Xud565b0a2008-12-15 23:38:52 -08002419 rcu_read_lock();
2420 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002421 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2422 continue;
2423
Herbert Xu86911732009-01-29 14:19:50 +00002424 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002425 mac_len = skb->network_header - skb->mac_header;
2426 skb->mac_len = mac_len;
2427 NAPI_GRO_CB(skb)->same_flow = 0;
2428 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002429 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002430
Herbert Xud565b0a2008-12-15 23:38:52 -08002431 pp = ptype->gro_receive(&napi->gro_list, skb);
2432 break;
2433 }
2434 rcu_read_unlock();
2435
2436 if (&ptype->list == head)
2437 goto normal;
2438
Herbert Xu0da2afd52008-12-26 14:57:42 -08002439 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002440 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002441
Herbert Xud565b0a2008-12-15 23:38:52 -08002442 if (pp) {
2443 struct sk_buff *nskb = *pp;
2444
2445 *pp = nskb->next;
2446 nskb->next = NULL;
2447 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002448 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002449 }
2450
Herbert Xu0da2afd52008-12-26 14:57:42 -08002451 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002452 goto ok;
2453
Herbert Xu4ae55442009-02-08 18:00:36 +00002454 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002455 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002456
Herbert Xu4ae55442009-02-08 18:00:36 +00002457 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002458 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002459 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002460 skb->next = napi->gro_list;
2461 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002462 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002463
Herbert Xuad0f9902009-02-01 01:24:55 -08002464pull:
Herbert Xucb189782009-05-26 18:50:31 +00002465 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2466 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2467
2468 BUG_ON(skb->end - skb->tail < grow);
2469
2470 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2471
2472 skb->tail += grow;
2473 skb->data_len -= grow;
2474
2475 skb_shinfo(skb)->frags[0].page_offset += grow;
2476 skb_shinfo(skb)->frags[0].size -= grow;
2477
2478 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2479 put_page(skb_shinfo(skb)->frags[0].page);
2480 memmove(skb_shinfo(skb)->frags,
2481 skb_shinfo(skb)->frags + 1,
2482 --skb_shinfo(skb)->nr_frags);
2483 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002484 }
2485
Herbert Xud565b0a2008-12-15 23:38:52 -08002486ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002487 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002488
2489normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002490 ret = GRO_NORMAL;
2491 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002492}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002493EXPORT_SYMBOL(dev_gro_receive);
2494
2495static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2496{
2497 struct sk_buff *p;
2498
Herbert Xud1c76af2009-03-16 10:50:02 -07002499 if (netpoll_rx_on(skb))
2500 return GRO_NORMAL;
2501
Herbert Xu96e93ea2009-01-06 10:49:34 -08002502 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002503 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2504 && !compare_ether_header(skb_mac_header(p),
2505 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002506 NAPI_GRO_CB(p)->flush = 0;
2507 }
2508
2509 return dev_gro_receive(napi, skb);
2510}
Herbert Xu5d38a072009-01-04 16:13:40 -08002511
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002512int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002513{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002514 int err = NET_RX_SUCCESS;
2515
2516 switch (ret) {
2517 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002518 return netif_receive_skb(skb);
2519
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002520 case GRO_DROP:
2521 err = NET_RX_DROP;
2522 /* fall through */
2523
2524 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002525 kfree_skb(skb);
2526 break;
2527 }
2528
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002529 return err;
2530}
2531EXPORT_SYMBOL(napi_skb_finish);
2532
Herbert Xu78a478d2009-05-26 18:50:21 +00002533void skb_gro_reset_offset(struct sk_buff *skb)
2534{
2535 NAPI_GRO_CB(skb)->data_offset = 0;
2536 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002537 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002538
Herbert Xu78d3fd02009-05-26 18:50:23 +00002539 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002540 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002541 NAPI_GRO_CB(skb)->frag0 =
2542 page_address(skb_shinfo(skb)->frags[0].page) +
2543 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002544 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2545 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002546}
2547EXPORT_SYMBOL(skb_gro_reset_offset);
2548
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002549int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2550{
Herbert Xu86911732009-01-29 14:19:50 +00002551 skb_gro_reset_offset(skb);
2552
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002553 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002554}
2555EXPORT_SYMBOL(napi_gro_receive);
2556
Herbert Xu96e93ea2009-01-06 10:49:34 -08002557void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2558{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002559 __skb_pull(skb, skb_headlen(skb));
2560 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2561
2562 napi->skb = skb;
2563}
2564EXPORT_SYMBOL(napi_reuse_skb);
2565
Herbert Xu76620aa2009-04-16 02:02:07 -07002566struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002567{
2568 struct net_device *dev = napi->dev;
2569 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002570
2571 if (!skb) {
2572 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2573 if (!skb)
2574 goto out;
2575
2576 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002577
2578 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002579 }
2580
Herbert Xu96e93ea2009-01-06 10:49:34 -08002581out:
2582 return skb;
2583}
Herbert Xu76620aa2009-04-16 02:02:07 -07002584EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002585
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002586int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2587{
2588 int err = NET_RX_SUCCESS;
2589
2590 switch (ret) {
2591 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002592 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002593 skb->protocol = eth_type_trans(skb, napi->dev);
2594
2595 if (ret == GRO_NORMAL)
2596 return netif_receive_skb(skb);
2597
2598 skb_gro_pull(skb, -ETH_HLEN);
2599 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002600
2601 case GRO_DROP:
2602 err = NET_RX_DROP;
2603 /* fall through */
2604
2605 case GRO_MERGED_FREE:
2606 napi_reuse_skb(napi, skb);
2607 break;
2608 }
2609
2610 return err;
2611}
2612EXPORT_SYMBOL(napi_frags_finish);
2613
Herbert Xu76620aa2009-04-16 02:02:07 -07002614struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002615{
Herbert Xu76620aa2009-04-16 02:02:07 -07002616 struct sk_buff *skb = napi->skb;
2617 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002618 unsigned int hlen;
2619 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002620
2621 napi->skb = NULL;
2622
2623 skb_reset_mac_header(skb);
2624 skb_gro_reset_offset(skb);
2625
Herbert Xua5b1cf22009-05-26 18:50:28 +00002626 off = skb_gro_offset(skb);
2627 hlen = off + sizeof(*eth);
2628 eth = skb_gro_header_fast(skb, off);
2629 if (skb_gro_header_hard(skb, hlen)) {
2630 eth = skb_gro_header_slow(skb, hlen, off);
2631 if (unlikely(!eth)) {
2632 napi_reuse_skb(napi, skb);
2633 skb = NULL;
2634 goto out;
2635 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002636 }
2637
2638 skb_gro_pull(skb, sizeof(*eth));
2639
2640 /*
2641 * This works because the only protocols we care about don't require
2642 * special handling. We'll fix it up properly at the end.
2643 */
2644 skb->protocol = eth->h_proto;
2645
2646out:
2647 return skb;
2648}
2649EXPORT_SYMBOL(napi_frags_skb);
2650
2651int napi_gro_frags(struct napi_struct *napi)
2652{
2653 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002654
2655 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002656 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002657
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002658 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002659}
2660EXPORT_SYMBOL(napi_gro_frags);
2661
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002662static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663{
2664 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2666 unsigned long start_time = jiffies;
2667
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002668 napi->weight = weight_p;
2669 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 local_irq_disable();
2673 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002674 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002675 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002676 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002677 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 local_irq_enable();
2680
Herbert Xu8f1ead22009-03-26 00:59:10 -07002681 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002682 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002684 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685}
2686
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002687/**
2688 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002689 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002690 *
2691 * The entry's receive function will be scheduled to run
2692 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002693void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002694{
2695 unsigned long flags;
2696
2697 local_irq_save(flags);
2698 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2699 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2700 local_irq_restore(flags);
2701}
2702EXPORT_SYMBOL(__napi_schedule);
2703
Herbert Xud565b0a2008-12-15 23:38:52 -08002704void __napi_complete(struct napi_struct *n)
2705{
2706 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2707 BUG_ON(n->gro_list);
2708
2709 list_del(&n->poll_list);
2710 smp_mb__before_clear_bit();
2711 clear_bit(NAPI_STATE_SCHED, &n->state);
2712}
2713EXPORT_SYMBOL(__napi_complete);
2714
2715void napi_complete(struct napi_struct *n)
2716{
2717 unsigned long flags;
2718
2719 /*
2720 * don't let napi dequeue from the cpu poll list
2721 * just in case its running on a different cpu
2722 */
2723 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2724 return;
2725
2726 napi_gro_flush(n);
2727 local_irq_save(flags);
2728 __napi_complete(n);
2729 local_irq_restore(flags);
2730}
2731EXPORT_SYMBOL(napi_complete);
2732
2733void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2734 int (*poll)(struct napi_struct *, int), int weight)
2735{
2736 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002737 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002738 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002739 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002740 napi->poll = poll;
2741 napi->weight = weight;
2742 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002743 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002744#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002745 spin_lock_init(&napi->poll_lock);
2746 napi->poll_owner = -1;
2747#endif
2748 set_bit(NAPI_STATE_SCHED, &napi->state);
2749}
2750EXPORT_SYMBOL(netif_napi_add);
2751
2752void netif_napi_del(struct napi_struct *napi)
2753{
2754 struct sk_buff *skb, *next;
2755
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002756 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002757 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002758
2759 for (skb = napi->gro_list; skb; skb = next) {
2760 next = skb->next;
2761 skb->next = NULL;
2762 kfree_skb(skb);
2763 }
2764
2765 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002766 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002767}
2768EXPORT_SYMBOL(netif_napi_del);
2769
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771static void net_rx_action(struct softirq_action *h)
2772{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002773 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002774 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002775 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002776 void *have;
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 local_irq_disable();
2779
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002780 while (!list_empty(list)) {
2781 struct napi_struct *n;
2782 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002784 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002785 * Allow this to run for 2 jiffies since which will allow
2786 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002787 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002788 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 goto softnet_break;
2790
2791 local_irq_enable();
2792
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002793 /* Even though interrupts have been re-enabled, this
2794 * access is safe because interrupts can only add new
2795 * entries to the tail of this list, and only ->poll()
2796 * calls can remove this head entry from the list.
2797 */
2798 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002800 have = netpoll_poll_lock(n);
2801
2802 weight = n->weight;
2803
David S. Miller0a7606c2007-10-29 21:28:47 -07002804 /* This NAPI_STATE_SCHED test is for avoiding a race
2805 * with netpoll's poll_napi(). Only the entity which
2806 * obtains the lock and sees NAPI_STATE_SCHED set will
2807 * actually make the ->poll() call. Therefore we avoid
2808 * accidently calling ->poll() when NAPI is not scheduled.
2809 */
2810 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002811 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002812 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002813 trace_napi_poll(n);
2814 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002815
2816 WARN_ON_ONCE(work > weight);
2817
2818 budget -= work;
2819
2820 local_irq_disable();
2821
2822 /* Drivers must not modify the NAPI state if they
2823 * consume the entire weight. In such cases this code
2824 * still "owns" the NAPI instance and therefore can
2825 * move the instance around on the list at-will.
2826 */
David S. Millerfed17f32008-01-07 21:00:40 -08002827 if (unlikely(work == weight)) {
2828 if (unlikely(napi_disable_pending(n)))
2829 __napi_complete(n);
2830 else
2831 list_move_tail(&n->poll_list, list);
2832 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002833
2834 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 }
2836out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002837 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002838
Chris Leechdb217332006-06-17 21:24:58 -07002839#ifdef CONFIG_NET_DMA
2840 /*
2841 * There may not be any more sk_buffs coming right now, so push
2842 * any pending DMA copies to hardware
2843 */
Dan Williams2ba05622009-01-06 11:38:14 -07002844 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002845#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 return;
2848
2849softnet_break:
2850 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2851 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2852 goto out;
2853}
2854
2855static gifconf_func_t * gifconf_list [NPROTO];
2856
2857/**
2858 * register_gifconf - register a SIOCGIF handler
2859 * @family: Address family
2860 * @gifconf: Function handler
2861 *
2862 * Register protocol dependent address dumping routines. The handler
2863 * that is passed must not be freed or reused until it has been replaced
2864 * by another handler.
2865 */
2866int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2867{
2868 if (family >= NPROTO)
2869 return -EINVAL;
2870 gifconf_list[family] = gifconf;
2871 return 0;
2872}
2873
2874
2875/*
2876 * Map an interface index to its name (SIOCGIFNAME)
2877 */
2878
2879/*
2880 * We need this ioctl for efficient implementation of the
2881 * if_indextoname() function required by the IPv6 API. Without
2882 * it, we would have to search all the interfaces to find a
2883 * match. --pb
2884 */
2885
Eric W. Biederman881d9662007-09-17 11:56:21 -07002886static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
2888 struct net_device *dev;
2889 struct ifreq ifr;
2890
2891 /*
2892 * Fetch the caller's info block.
2893 */
2894
2895 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2896 return -EFAULT;
2897
2898 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002899 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 if (!dev) {
2901 read_unlock(&dev_base_lock);
2902 return -ENODEV;
2903 }
2904
2905 strcpy(ifr.ifr_name, dev->name);
2906 read_unlock(&dev_base_lock);
2907
2908 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2909 return -EFAULT;
2910 return 0;
2911}
2912
2913/*
2914 * Perform a SIOCGIFCONF call. This structure will change
2915 * size eventually, and there is nothing I can do about it.
2916 * Thus we will need a 'compatibility mode'.
2917 */
2918
Eric W. Biederman881d9662007-09-17 11:56:21 -07002919static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920{
2921 struct ifconf ifc;
2922 struct net_device *dev;
2923 char __user *pos;
2924 int len;
2925 int total;
2926 int i;
2927
2928 /*
2929 * Fetch the caller's info block.
2930 */
2931
2932 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2933 return -EFAULT;
2934
2935 pos = ifc.ifc_buf;
2936 len = ifc.ifc_len;
2937
2938 /*
2939 * Loop over the interfaces, and write an info block for each.
2940 */
2941
2942 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002943 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 for (i = 0; i < NPROTO; i++) {
2945 if (gifconf_list[i]) {
2946 int done;
2947 if (!pos)
2948 done = gifconf_list[i](dev, NULL, 0);
2949 else
2950 done = gifconf_list[i](dev, pos + total,
2951 len - total);
2952 if (done < 0)
2953 return -EFAULT;
2954 total += done;
2955 }
2956 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958
2959 /*
2960 * All done. Write the updated control block back to the caller.
2961 */
2962 ifc.ifc_len = total;
2963
2964 /*
2965 * Both BSD and Solaris return 0 here, so we do too.
2966 */
2967 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2968}
2969
2970#ifdef CONFIG_PROC_FS
2971/*
2972 * This is invoked by the /proc filesystem handler to display a device
2973 * in detail.
2974 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08002976 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
Denis V. Luneve372c412007-11-19 22:31:54 -08002978 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002979 loff_t off;
2980 struct net_device *dev;
2981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07002983 if (!*pos)
2984 return SEQ_START_TOKEN;
2985
2986 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002987 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07002988 if (off++ == *pos)
2989 return dev;
2990
2991 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992}
2993
2994void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2995{
Denis V. Luneve372c412007-11-19 22:31:54 -08002996 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07002998 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07002999 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000}
3001
3002void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003003 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004{
3005 read_unlock(&dev_base_lock);
3006}
3007
3008static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3009{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003010 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Rusty Russell5a1b5892007-04-28 21:04:03 -07003012 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3013 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3014 dev->name, stats->rx_bytes, stats->rx_packets,
3015 stats->rx_errors,
3016 stats->rx_dropped + stats->rx_missed_errors,
3017 stats->rx_fifo_errors,
3018 stats->rx_length_errors + stats->rx_over_errors +
3019 stats->rx_crc_errors + stats->rx_frame_errors,
3020 stats->rx_compressed, stats->multicast,
3021 stats->tx_bytes, stats->tx_packets,
3022 stats->tx_errors, stats->tx_dropped,
3023 stats->tx_fifo_errors, stats->collisions,
3024 stats->tx_carrier_errors +
3025 stats->tx_aborted_errors +
3026 stats->tx_window_errors +
3027 stats->tx_heartbeat_errors,
3028 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029}
3030
3031/*
3032 * Called from the PROCfs module. This now uses the new arbitrary sized
3033 * /proc/net interface to create /proc/net/dev
3034 */
3035static int dev_seq_show(struct seq_file *seq, void *v)
3036{
3037 if (v == SEQ_START_TOKEN)
3038 seq_puts(seq, "Inter-| Receive "
3039 " | Transmit\n"
3040 " face |bytes packets errs drop fifo frame "
3041 "compressed multicast|bytes packets errs "
3042 "drop fifo colls carrier compressed\n");
3043 else
3044 dev_seq_printf_stats(seq, v);
3045 return 0;
3046}
3047
3048static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3049{
3050 struct netif_rx_stats *rc = NULL;
3051
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003052 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003053 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054 rc = &per_cpu(netdev_rx_stat, *pos);
3055 break;
3056 } else
3057 ++*pos;
3058 return rc;
3059}
3060
3061static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3062{
3063 return softnet_get_online(pos);
3064}
3065
3066static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3067{
3068 ++*pos;
3069 return softnet_get_online(pos);
3070}
3071
3072static void softnet_seq_stop(struct seq_file *seq, void *v)
3073{
3074}
3075
3076static int softnet_seq_show(struct seq_file *seq, void *v)
3077{
3078 struct netif_rx_stats *s = v;
3079
3080 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003081 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003082 0, 0, 0, 0, /* was fastroute */
3083 s->cpu_collision );
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 return 0;
3085}
3086
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003087static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 .start = dev_seq_start,
3089 .next = dev_seq_next,
3090 .stop = dev_seq_stop,
3091 .show = dev_seq_show,
3092};
3093
3094static int dev_seq_open(struct inode *inode, struct file *file)
3095{
Denis V. Luneve372c412007-11-19 22:31:54 -08003096 return seq_open_net(inode, file, &dev_seq_ops,
3097 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098}
3099
Arjan van de Ven9a321442007-02-12 00:55:35 -08003100static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003101 .owner = THIS_MODULE,
3102 .open = dev_seq_open,
3103 .read = seq_read,
3104 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003105 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106};
3107
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003108static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 .start = softnet_seq_start,
3110 .next = softnet_seq_next,
3111 .stop = softnet_seq_stop,
3112 .show = softnet_seq_show,
3113};
3114
3115static int softnet_seq_open(struct inode *inode, struct file *file)
3116{
3117 return seq_open(file, &softnet_seq_ops);
3118}
3119
Arjan van de Ven9a321442007-02-12 00:55:35 -08003120static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003121 .owner = THIS_MODULE,
3122 .open = softnet_seq_open,
3123 .read = seq_read,
3124 .llseek = seq_lseek,
3125 .release = seq_release,
3126};
3127
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003128static void *ptype_get_idx(loff_t pos)
3129{
3130 struct packet_type *pt = NULL;
3131 loff_t i = 0;
3132 int t;
3133
3134 list_for_each_entry_rcu(pt, &ptype_all, list) {
3135 if (i == pos)
3136 return pt;
3137 ++i;
3138 }
3139
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003140 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003141 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3142 if (i == pos)
3143 return pt;
3144 ++i;
3145 }
3146 }
3147 return NULL;
3148}
3149
3150static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003151 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003152{
3153 rcu_read_lock();
3154 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3155}
3156
3157static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3158{
3159 struct packet_type *pt;
3160 struct list_head *nxt;
3161 int hash;
3162
3163 ++*pos;
3164 if (v == SEQ_START_TOKEN)
3165 return ptype_get_idx(0);
3166
3167 pt = v;
3168 nxt = pt->list.next;
3169 if (pt->type == htons(ETH_P_ALL)) {
3170 if (nxt != &ptype_all)
3171 goto found;
3172 hash = 0;
3173 nxt = ptype_base[0].next;
3174 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003175 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003176
3177 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003178 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003179 return NULL;
3180 nxt = ptype_base[hash].next;
3181 }
3182found:
3183 return list_entry(nxt, struct packet_type, list);
3184}
3185
3186static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003187 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003188{
3189 rcu_read_unlock();
3190}
3191
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003192static int ptype_seq_show(struct seq_file *seq, void *v)
3193{
3194 struct packet_type *pt = v;
3195
3196 if (v == SEQ_START_TOKEN)
3197 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003198 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003199 if (pt->type == htons(ETH_P_ALL))
3200 seq_puts(seq, "ALL ");
3201 else
3202 seq_printf(seq, "%04x", ntohs(pt->type));
3203
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003204 seq_printf(seq, " %-8s %pF\n",
3205 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003206 }
3207
3208 return 0;
3209}
3210
3211static const struct seq_operations ptype_seq_ops = {
3212 .start = ptype_seq_start,
3213 .next = ptype_seq_next,
3214 .stop = ptype_seq_stop,
3215 .show = ptype_seq_show,
3216};
3217
3218static int ptype_seq_open(struct inode *inode, struct file *file)
3219{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003220 return seq_open_net(inode, file, &ptype_seq_ops,
3221 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003222}
3223
3224static const struct file_operations ptype_seq_fops = {
3225 .owner = THIS_MODULE,
3226 .open = ptype_seq_open,
3227 .read = seq_read,
3228 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003229 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003230};
3231
3232
Pavel Emelyanov46650792007-10-08 20:38:39 -07003233static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
3235 int rc = -ENOMEM;
3236
Eric W. Biederman881d9662007-09-17 11:56:21 -07003237 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003239 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003241 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003242 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003243
Eric W. Biederman881d9662007-09-17 11:56:21 -07003244 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003245 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 rc = 0;
3247out:
3248 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003249out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003250 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003252 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003254 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 goto out;
3256}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003257
Pavel Emelyanov46650792007-10-08 20:38:39 -07003258static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003259{
3260 wext_proc_exit(net);
3261
3262 proc_net_remove(net, "ptype");
3263 proc_net_remove(net, "softnet_stat");
3264 proc_net_remove(net, "dev");
3265}
3266
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003267static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003268 .init = dev_proc_net_init,
3269 .exit = dev_proc_net_exit,
3270};
3271
3272static int __init dev_proc_init(void)
3273{
3274 return register_pernet_subsys(&dev_proc_ops);
3275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276#else
3277#define dev_proc_init() 0
3278#endif /* CONFIG_PROC_FS */
3279
3280
3281/**
3282 * netdev_set_master - set up master/slave pair
3283 * @slave: slave device
3284 * @master: new master device
3285 *
3286 * Changes the master device of the slave. Pass %NULL to break the
3287 * bonding. The caller must hold the RTNL semaphore. On a failure
3288 * a negative errno code is returned. On success the reference counts
3289 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3290 * function returns zero.
3291 */
3292int netdev_set_master(struct net_device *slave, struct net_device *master)
3293{
3294 struct net_device *old = slave->master;
3295
3296 ASSERT_RTNL();
3297
3298 if (master) {
3299 if (old)
3300 return -EBUSY;
3301 dev_hold(master);
3302 }
3303
3304 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003305
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 synchronize_net();
3307
3308 if (old)
3309 dev_put(old);
3310
3311 if (master)
3312 slave->flags |= IFF_SLAVE;
3313 else
3314 slave->flags &= ~IFF_SLAVE;
3315
3316 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3317 return 0;
3318}
3319
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003320static void dev_change_rx_flags(struct net_device *dev, int flags)
3321{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003322 const struct net_device_ops *ops = dev->netdev_ops;
3323
3324 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3325 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003326}
3327
Wang Chendad9b332008-06-18 01:48:28 -07003328static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003329{
3330 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003331 uid_t uid;
3332 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003333
Patrick McHardy24023452007-07-14 18:51:31 -07003334 ASSERT_RTNL();
3335
Wang Chendad9b332008-06-18 01:48:28 -07003336 dev->flags |= IFF_PROMISC;
3337 dev->promiscuity += inc;
3338 if (dev->promiscuity == 0) {
3339 /*
3340 * Avoid overflow.
3341 * If inc causes overflow, untouch promisc and return error.
3342 */
3343 if (inc < 0)
3344 dev->flags &= ~IFF_PROMISC;
3345 else {
3346 dev->promiscuity -= inc;
3347 printk(KERN_WARNING "%s: promiscuity touches roof, "
3348 "set promiscuity failed, promiscuity feature "
3349 "of device might be broken.\n", dev->name);
3350 return -EOVERFLOW;
3351 }
3352 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003353 if (dev->flags != old_flags) {
3354 printk(KERN_INFO "device %s %s promiscuous mode\n",
3355 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3356 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003357 if (audit_enabled) {
3358 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003359 audit_log(current->audit_context, GFP_ATOMIC,
3360 AUDIT_ANOM_PROMISCUOUS,
3361 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3362 dev->name, (dev->flags & IFF_PROMISC),
3363 (old_flags & IFF_PROMISC),
3364 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003365 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003366 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003367 }
Patrick McHardy24023452007-07-14 18:51:31 -07003368
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003369 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003370 }
Wang Chendad9b332008-06-18 01:48:28 -07003371 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003372}
3373
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374/**
3375 * dev_set_promiscuity - update promiscuity count on a device
3376 * @dev: device
3377 * @inc: modifier
3378 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003379 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 * remains above zero the interface remains promiscuous. Once it hits zero
3381 * the device reverts back to normal filtering operation. A negative inc
3382 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003383 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 */
Wang Chendad9b332008-06-18 01:48:28 -07003385int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386{
3387 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003388 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
Wang Chendad9b332008-06-18 01:48:28 -07003390 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003391 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003392 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003393 if (dev->flags != old_flags)
3394 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003395 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396}
3397
3398/**
3399 * dev_set_allmulti - update allmulti count on a device
3400 * @dev: device
3401 * @inc: modifier
3402 *
3403 * Add or remove reception of all multicast frames to a device. While the
3404 * count in the device remains above zero the interface remains listening
3405 * to all interfaces. Once it hits zero the device reverts back to normal
3406 * filtering operation. A negative @inc value is used to drop the counter
3407 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003408 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 */
3410
Wang Chendad9b332008-06-18 01:48:28 -07003411int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412{
3413 unsigned short old_flags = dev->flags;
3414
Patrick McHardy24023452007-07-14 18:51:31 -07003415 ASSERT_RTNL();
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003418 dev->allmulti += inc;
3419 if (dev->allmulti == 0) {
3420 /*
3421 * Avoid overflow.
3422 * If inc causes overflow, untouch allmulti and return error.
3423 */
3424 if (inc < 0)
3425 dev->flags &= ~IFF_ALLMULTI;
3426 else {
3427 dev->allmulti -= inc;
3428 printk(KERN_WARNING "%s: allmulti touches roof, "
3429 "set allmulti failed, allmulti feature of "
3430 "device might be broken.\n", dev->name);
3431 return -EOVERFLOW;
3432 }
3433 }
Patrick McHardy24023452007-07-14 18:51:31 -07003434 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003435 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003436 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003437 }
Wang Chendad9b332008-06-18 01:48:28 -07003438 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003439}
3440
3441/*
3442 * Upload unicast and multicast address lists to device and
3443 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003444 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003445 * are present.
3446 */
3447void __dev_set_rx_mode(struct net_device *dev)
3448{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003449 const struct net_device_ops *ops = dev->netdev_ops;
3450
Patrick McHardy4417da62007-06-27 01:28:10 -07003451 /* dev_open will call this function so the list will stay sane. */
3452 if (!(dev->flags&IFF_UP))
3453 return;
3454
3455 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003456 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003457
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003458 if (ops->ndo_set_rx_mode)
3459 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003460 else {
3461 /* Unicast addresses changes may only happen under the rtnl,
3462 * therefore calling __dev_set_promiscuity here is safe.
3463 */
3464 if (dev->uc_count > 0 && !dev->uc_promisc) {
3465 __dev_set_promiscuity(dev, 1);
3466 dev->uc_promisc = 1;
3467 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3468 __dev_set_promiscuity(dev, -1);
3469 dev->uc_promisc = 0;
3470 }
3471
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003472 if (ops->ndo_set_multicast_list)
3473 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003474 }
3475}
3476
3477void dev_set_rx_mode(struct net_device *dev)
3478{
David S. Millerb9e40852008-07-15 00:15:08 -07003479 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003480 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003481 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482}
3483
Jiri Pirkof001fde2009-05-05 02:48:28 +00003484/* hw addresses list handling functions */
3485
Jiri Pirkoccffad252009-05-22 23:22:17 +00003486static int __hw_addr_add(struct list_head *list, int *delta,
3487 unsigned char *addr, int addr_len,
3488 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003489{
3490 struct netdev_hw_addr *ha;
3491 int alloc_size;
3492
3493 if (addr_len > MAX_ADDR_LEN)
3494 return -EINVAL;
3495
Jiri Pirkoccffad252009-05-22 23:22:17 +00003496 list_for_each_entry(ha, list, list) {
3497 if (!memcmp(ha->addr, addr, addr_len) &&
3498 ha->type == addr_type) {
3499 ha->refcount++;
3500 return 0;
3501 }
3502 }
3503
3504
Jiri Pirkof001fde2009-05-05 02:48:28 +00003505 alloc_size = sizeof(*ha);
3506 if (alloc_size < L1_CACHE_BYTES)
3507 alloc_size = L1_CACHE_BYTES;
3508 ha = kmalloc(alloc_size, GFP_ATOMIC);
3509 if (!ha)
3510 return -ENOMEM;
3511 memcpy(ha->addr, addr, addr_len);
3512 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003513 ha->refcount = 1;
3514 ha->synced = false;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003515 list_add_tail_rcu(&ha->list, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003516 if (delta)
3517 (*delta)++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003518 return 0;
3519}
3520
3521static void ha_rcu_free(struct rcu_head *head)
3522{
3523 struct netdev_hw_addr *ha;
3524
3525 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3526 kfree(ha);
3527}
3528
Jiri Pirkoccffad252009-05-22 23:22:17 +00003529static int __hw_addr_del(struct list_head *list, int *delta,
3530 unsigned char *addr, int addr_len,
3531 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003532{
3533 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003534
3535 list_for_each_entry(ha, list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003536 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003537 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003538 if (--ha->refcount)
3539 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003540 list_del_rcu(&ha->list);
3541 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003542 if (delta)
3543 (*delta)--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003544 return 0;
3545 }
3546 }
3547 return -ENOENT;
3548}
3549
Jiri Pirkoccffad252009-05-22 23:22:17 +00003550static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
3551 struct list_head *from_list, int addr_len,
3552 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003553{
3554 int err;
3555 struct netdev_hw_addr *ha, *ha2;
3556 unsigned char type;
3557
3558 list_for_each_entry(ha, from_list, list) {
3559 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003560 err = __hw_addr_add(to_list, to_delta, ha->addr,
3561 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003562 if (err)
3563 goto unroll;
3564 }
3565 return 0;
3566
3567unroll:
3568 list_for_each_entry(ha2, from_list, list) {
3569 if (ha2 == ha)
3570 break;
3571 type = addr_type ? addr_type : ha2->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003572 __hw_addr_del(to_list, to_delta, ha2->addr,
3573 addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003574 }
3575 return err;
3576}
3577
Jiri Pirkoccffad252009-05-22 23:22:17 +00003578static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
3579 struct list_head *from_list, int addr_len,
3580 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003581{
3582 struct netdev_hw_addr *ha;
3583 unsigned char type;
3584
3585 list_for_each_entry(ha, from_list, list) {
3586 type = addr_type ? addr_type : ha->type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003587 __hw_addr_del(to_list, to_delta, ha->addr,
3588 addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003589 }
3590}
3591
Jiri Pirkoccffad252009-05-22 23:22:17 +00003592static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
3593 struct list_head *from_list, int *from_delta,
3594 int addr_len)
3595{
3596 int err = 0;
3597 struct netdev_hw_addr *ha, *tmp;
3598
3599 list_for_each_entry_safe(ha, tmp, from_list, list) {
3600 if (!ha->synced) {
3601 err = __hw_addr_add(to_list, to_delta, ha->addr,
3602 addr_len, ha->type);
3603 if (err)
3604 break;
3605 ha->synced = true;
3606 ha->refcount++;
3607 } else if (ha->refcount == 1) {
3608 __hw_addr_del(to_list, to_delta, ha->addr,
3609 addr_len, ha->type);
3610 __hw_addr_del(from_list, from_delta, ha->addr,
3611 addr_len, ha->type);
3612 }
3613 }
3614 return err;
3615}
3616
3617static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
3618 struct list_head *from_list, int *from_delta,
3619 int addr_len)
3620{
3621 struct netdev_hw_addr *ha, *tmp;
3622
3623 list_for_each_entry_safe(ha, tmp, from_list, list) {
3624 if (ha->synced) {
3625 __hw_addr_del(to_list, to_delta, ha->addr,
3626 addr_len, ha->type);
3627 ha->synced = false;
3628 __hw_addr_del(from_list, from_delta, ha->addr,
3629 addr_len, ha->type);
3630 }
3631 }
3632}
3633
3634
Jiri Pirkof001fde2009-05-05 02:48:28 +00003635static void __hw_addr_flush(struct list_head *list)
3636{
3637 struct netdev_hw_addr *ha, *tmp;
3638
3639 list_for_each_entry_safe(ha, tmp, list, list) {
3640 list_del_rcu(&ha->list);
3641 call_rcu(&ha->rcu_head, ha_rcu_free);
3642 }
3643}
3644
3645/* Device addresses handling functions */
3646
3647static void dev_addr_flush(struct net_device *dev)
3648{
3649 /* rtnl_mutex must be held here */
3650
3651 __hw_addr_flush(&dev->dev_addr_list);
3652 dev->dev_addr = NULL;
3653}
3654
3655static int dev_addr_init(struct net_device *dev)
3656{
3657 unsigned char addr[MAX_ADDR_LEN];
3658 struct netdev_hw_addr *ha;
3659 int err;
3660
3661 /* rtnl_mutex must be held here */
3662
3663 INIT_LIST_HEAD(&dev->dev_addr_list);
Eric Dumazet0c279222009-06-08 03:49:24 +00003664 memset(addr, 0, sizeof(addr));
3665 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003666 NETDEV_HW_ADDR_T_LAN);
3667 if (!err) {
3668 /*
3669 * Get the first (previously created) address from the list
3670 * and set dev_addr pointer to this location.
3671 */
3672 ha = list_first_entry(&dev->dev_addr_list,
3673 struct netdev_hw_addr, list);
3674 dev->dev_addr = ha->addr;
3675 }
3676 return err;
3677}
3678
3679/**
3680 * dev_addr_add - Add a device address
3681 * @dev: device
3682 * @addr: address to add
3683 * @addr_type: address type
3684 *
3685 * Add a device address to the device or increase the reference count if
3686 * it already exists.
3687 *
3688 * The caller must hold the rtnl_mutex.
3689 */
3690int dev_addr_add(struct net_device *dev, unsigned char *addr,
3691 unsigned char addr_type)
3692{
3693 int err;
3694
3695 ASSERT_RTNL();
3696
Jiri Pirkoccffad252009-05-22 23:22:17 +00003697 err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003698 addr_type);
3699 if (!err)
3700 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3701 return err;
3702}
3703EXPORT_SYMBOL(dev_addr_add);
3704
3705/**
3706 * dev_addr_del - Release a device address.
3707 * @dev: device
3708 * @addr: address to delete
3709 * @addr_type: address type
3710 *
3711 * Release reference to a device address and remove it from the device
3712 * if the reference count drops to zero.
3713 *
3714 * The caller must hold the rtnl_mutex.
3715 */
3716int dev_addr_del(struct net_device *dev, unsigned char *addr,
3717 unsigned char addr_type)
3718{
3719 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003720 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003721
3722 ASSERT_RTNL();
3723
Jiri Pirkoccffad252009-05-22 23:22:17 +00003724 /*
3725 * We can not remove the first address from the list because
3726 * dev->dev_addr points to that.
3727 */
3728 ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
3729 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3730 return -ENOENT;
3731
3732 err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
3733 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003734 if (!err)
3735 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3736 return err;
3737}
3738EXPORT_SYMBOL(dev_addr_del);
3739
3740/**
3741 * dev_addr_add_multiple - Add device addresses from another device
3742 * @to_dev: device to which addresses will be added
3743 * @from_dev: device from which addresses will be added
3744 * @addr_type: address type - 0 means type will be used from from_dev
3745 *
3746 * Add device addresses of the one device to another.
3747 **
3748 * The caller must hold the rtnl_mutex.
3749 */
3750int dev_addr_add_multiple(struct net_device *to_dev,
3751 struct net_device *from_dev,
3752 unsigned char addr_type)
3753{
3754 int err;
3755
3756 ASSERT_RTNL();
3757
3758 if (from_dev->addr_len != to_dev->addr_len)
3759 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003760 err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
3761 &from_dev->dev_addr_list,
3762 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003763 if (!err)
3764 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3765 return err;
3766}
3767EXPORT_SYMBOL(dev_addr_add_multiple);
3768
3769/**
3770 * dev_addr_del_multiple - Delete device addresses by another device
3771 * @to_dev: device where the addresses will be deleted
3772 * @from_dev: device by which addresses the addresses will be deleted
3773 * @addr_type: address type - 0 means type will used from from_dev
3774 *
3775 * Deletes addresses in to device by the list of addresses in from device.
3776 *
3777 * The caller must hold the rtnl_mutex.
3778 */
3779int dev_addr_del_multiple(struct net_device *to_dev,
3780 struct net_device *from_dev,
3781 unsigned char addr_type)
3782{
3783 ASSERT_RTNL();
3784
3785 if (from_dev->addr_len != to_dev->addr_len)
3786 return -EINVAL;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003787 __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
3788 &from_dev->dev_addr_list,
3789 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003790 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3791 return 0;
3792}
3793EXPORT_SYMBOL(dev_addr_del_multiple);
3794
3795/* unicast and multicast addresses handling functions */
3796
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003797int __dev_addr_delete(struct dev_addr_list **list, int *count,
3798 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003799{
3800 struct dev_addr_list *da;
3801
3802 for (; (da = *list) != NULL; list = &da->next) {
3803 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3804 alen == da->da_addrlen) {
3805 if (glbl) {
3806 int old_glbl = da->da_gusers;
3807 da->da_gusers = 0;
3808 if (old_glbl == 0)
3809 break;
3810 }
3811 if (--da->da_users)
3812 return 0;
3813
3814 *list = da->next;
3815 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003816 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003817 return 0;
3818 }
3819 }
3820 return -ENOENT;
3821}
3822
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003823int __dev_addr_add(struct dev_addr_list **list, int *count,
3824 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003825{
3826 struct dev_addr_list *da;
3827
3828 for (da = *list; da != NULL; da = da->next) {
3829 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3830 da->da_addrlen == alen) {
3831 if (glbl) {
3832 int old_glbl = da->da_gusers;
3833 da->da_gusers = 1;
3834 if (old_glbl)
3835 return 0;
3836 }
3837 da->da_users++;
3838 return 0;
3839 }
3840 }
3841
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003842 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003843 if (da == NULL)
3844 return -ENOMEM;
3845 memcpy(da->da_addr, addr, alen);
3846 da->da_addrlen = alen;
3847 da->da_users = 1;
3848 da->da_gusers = glbl ? 1 : 0;
3849 da->next = *list;
3850 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003851 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003852 return 0;
3853}
3854
Patrick McHardy4417da62007-06-27 01:28:10 -07003855/**
3856 * dev_unicast_delete - Release secondary unicast address.
3857 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003858 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003859 *
3860 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003861 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003862 *
3863 * The caller must hold the rtnl_mutex.
3864 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003865int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003866{
3867 int err;
3868
3869 ASSERT_RTNL();
3870
Jiri Pirkoccffad252009-05-22 23:22:17 +00003871 err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
3872 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003873 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003874 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003875 return err;
3876}
3877EXPORT_SYMBOL(dev_unicast_delete);
3878
3879/**
3880 * dev_unicast_add - add a secondary unicast address
3881 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003882 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003883 *
3884 * Add a secondary unicast address to the device or increase
3885 * the reference count if it already exists.
3886 *
3887 * The caller must hold the rtnl_mutex.
3888 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003889int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003890{
3891 int err;
3892
3893 ASSERT_RTNL();
3894
Jiri Pirkoccffad252009-05-22 23:22:17 +00003895 err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
3896 dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003897 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003898 __dev_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003899 return err;
3900}
3901EXPORT_SYMBOL(dev_unicast_add);
3902
Chris Leeche83a2ea2008-01-31 16:53:23 -08003903int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3904 struct dev_addr_list **from, int *from_count)
3905{
3906 struct dev_addr_list *da, *next;
3907 int err = 0;
3908
3909 da = *from;
3910 while (da != NULL) {
3911 next = da->next;
3912 if (!da->da_synced) {
3913 err = __dev_addr_add(to, to_count,
3914 da->da_addr, da->da_addrlen, 0);
3915 if (err < 0)
3916 break;
3917 da->da_synced = 1;
3918 da->da_users++;
3919 } else if (da->da_users == 1) {
3920 __dev_addr_delete(to, to_count,
3921 da->da_addr, da->da_addrlen, 0);
3922 __dev_addr_delete(from, from_count,
3923 da->da_addr, da->da_addrlen, 0);
3924 }
3925 da = next;
3926 }
3927 return err;
3928}
3929
3930void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3931 struct dev_addr_list **from, int *from_count)
3932{
3933 struct dev_addr_list *da, *next;
3934
3935 da = *from;
3936 while (da != NULL) {
3937 next = da->next;
3938 if (da->da_synced) {
3939 __dev_addr_delete(to, to_count,
3940 da->da_addr, da->da_addrlen, 0);
3941 da->da_synced = 0;
3942 __dev_addr_delete(from, from_count,
3943 da->da_addr, da->da_addrlen, 0);
3944 }
3945 da = next;
3946 }
3947}
3948
3949/**
3950 * dev_unicast_sync - Synchronize device's unicast list to another device
3951 * @to: destination device
3952 * @from: source device
3953 *
3954 * Add newly added addresses to the destination device and release
Jiri Pirkoccffad252009-05-22 23:22:17 +00003955 * addresses that have no users left.
Chris Leeche83a2ea2008-01-31 16:53:23 -08003956 *
3957 * This function is intended to be called from the dev->set_rx_mode
3958 * function of layered software devices.
3959 */
3960int dev_unicast_sync(struct net_device *to, struct net_device *from)
3961{
3962 int err = 0;
3963
Jiri Pirkoccffad252009-05-22 23:22:17 +00003964 ASSERT_RTNL();
3965
3966 if (to->addr_len != from->addr_len)
3967 return -EINVAL;
3968
3969 err = __hw_addr_sync(&to->uc_list, &to->uc_count,
3970 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003971 if (!err)
3972 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003973 return err;
3974}
3975EXPORT_SYMBOL(dev_unicast_sync);
3976
3977/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08003978 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08003979 * @to: destination device
3980 * @from: source device
3981 *
3982 * Remove all addresses that were added to the destination device by
3983 * dev_unicast_sync(). This function is intended to be called from the
3984 * dev->stop function of layered software devices.
3985 */
3986void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3987{
Jiri Pirkoccffad252009-05-22 23:22:17 +00003988 ASSERT_RTNL();
Chris Leeche83a2ea2008-01-31 16:53:23 -08003989
Jiri Pirkoccffad252009-05-22 23:22:17 +00003990 if (to->addr_len != from->addr_len)
3991 return;
3992
3993 __hw_addr_unsync(&to->uc_list, &to->uc_count,
3994 &from->uc_list, &from->uc_count, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003995 __dev_set_rx_mode(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003996}
3997EXPORT_SYMBOL(dev_unicast_unsync);
3998
Jiri Pirkoccffad252009-05-22 23:22:17 +00003999static void dev_unicast_flush(struct net_device *dev)
4000{
4001 /* rtnl_mutex must be held here */
4002
4003 __hw_addr_flush(&dev->uc_list);
4004 dev->uc_count = 0;
4005}
4006
4007static void dev_unicast_init(struct net_device *dev)
4008{
4009 /* rtnl_mutex must be held here */
4010
4011 INIT_LIST_HEAD(&dev->uc_list);
4012}
4013
4014
Denis Cheng12972622007-07-18 02:12:56 -07004015static void __dev_addr_discard(struct dev_addr_list **list)
4016{
4017 struct dev_addr_list *tmp;
4018
4019 while (*list != NULL) {
4020 tmp = *list;
4021 *list = tmp->next;
4022 if (tmp->da_users > tmp->da_gusers)
4023 printk("__dev_addr_discard: address leakage! "
4024 "da_users=%d\n", tmp->da_users);
4025 kfree(tmp);
4026 }
4027}
4028
Denis Cheng26cc2522007-07-18 02:12:03 -07004029static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004030{
David S. Millerb9e40852008-07-15 00:15:08 -07004031 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004032
Denis Cheng456ad752007-07-18 02:10:54 -07004033 __dev_addr_discard(&dev->mc_list);
4034 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004035
David S. Millerb9e40852008-07-15 00:15:08 -07004036 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004037}
4038
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004039/**
4040 * dev_get_flags - get flags reported to userspace
4041 * @dev: device
4042 *
4043 * Get the combination of flag bits exported through APIs to userspace.
4044 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045unsigned dev_get_flags(const struct net_device *dev)
4046{
4047 unsigned flags;
4048
4049 flags = (dev->flags & ~(IFF_PROMISC |
4050 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004051 IFF_RUNNING |
4052 IFF_LOWER_UP |
4053 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 (dev->gflags & (IFF_PROMISC |
4055 IFF_ALLMULTI));
4056
Stefan Rompfb00055a2006-03-20 17:09:11 -08004057 if (netif_running(dev)) {
4058 if (netif_oper_up(dev))
4059 flags |= IFF_RUNNING;
4060 if (netif_carrier_ok(dev))
4061 flags |= IFF_LOWER_UP;
4062 if (netif_dormant(dev))
4063 flags |= IFF_DORMANT;
4064 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065
4066 return flags;
4067}
4068
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004069/**
4070 * dev_change_flags - change device settings
4071 * @dev: device
4072 * @flags: device state flags
4073 *
4074 * Change settings on device based state flags. The flags are
4075 * in the userspace exported format.
4076 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077int dev_change_flags(struct net_device *dev, unsigned flags)
4078{
Thomas Graf7c355f52007-06-05 16:03:03 -07004079 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 int old_flags = dev->flags;
4081
Patrick McHardy24023452007-07-14 18:51:31 -07004082 ASSERT_RTNL();
4083
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 /*
4085 * Set the flags on our device.
4086 */
4087
4088 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4089 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4090 IFF_AUTOMEDIA)) |
4091 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4092 IFF_ALLMULTI));
4093
4094 /*
4095 * Load in the correct multicast list now the flags have changed.
4096 */
4097
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004098 if ((old_flags ^ flags) & IFF_MULTICAST)
4099 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004100
Patrick McHardy4417da62007-06-27 01:28:10 -07004101 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102
4103 /*
4104 * Have we downed the interface. We handle IFF_UP ourselves
4105 * according to user attempts to set it, rather than blindly
4106 * setting it.
4107 */
4108
4109 ret = 0;
4110 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4111 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4112
4113 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004114 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 }
4116
4117 if (dev->flags & IFF_UP &&
4118 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
4119 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004120 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121
4122 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4123 int inc = (flags & IFF_PROMISC) ? +1 : -1;
4124 dev->gflags ^= IFF_PROMISC;
4125 dev_set_promiscuity(dev, inc);
4126 }
4127
4128 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4129 is important. Some (broken) drivers set IFF_PROMISC, when
4130 IFF_ALLMULTI is requested not asking us and not reporting.
4131 */
4132 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4133 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
4134 dev->gflags ^= IFF_ALLMULTI;
4135 dev_set_allmulti(dev, inc);
4136 }
4137
Thomas Graf7c355f52007-06-05 16:03:03 -07004138 /* Exclude state transition flags, already notified */
4139 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4140 if (changes)
4141 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
4143 return ret;
4144}
4145
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004146/**
4147 * dev_set_mtu - Change maximum transfer unit
4148 * @dev: device
4149 * @new_mtu: new transfer unit
4150 *
4151 * Change the maximum transfer size of the network device.
4152 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153int dev_set_mtu(struct net_device *dev, int new_mtu)
4154{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004155 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 int err;
4157
4158 if (new_mtu == dev->mtu)
4159 return 0;
4160
4161 /* MTU must be positive. */
4162 if (new_mtu < 0)
4163 return -EINVAL;
4164
4165 if (!netif_device_present(dev))
4166 return -ENODEV;
4167
4168 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004169 if (ops->ndo_change_mtu)
4170 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 else
4172 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004173
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004175 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 return err;
4177}
4178
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004179/**
4180 * dev_set_mac_address - Change Media Access Control Address
4181 * @dev: device
4182 * @sa: new address
4183 *
4184 * Change the hardware (MAC) address of the device
4185 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4187{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004188 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 int err;
4190
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004191 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 return -EOPNOTSUPP;
4193 if (sa->sa_family != dev->type)
4194 return -EINVAL;
4195 if (!netif_device_present(dev))
4196 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004197 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004199 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 return err;
4201}
4202
4203/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004204 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004206static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207{
4208 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004209 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210
4211 if (!dev)
4212 return -ENODEV;
4213
4214 switch (cmd) {
4215 case SIOCGIFFLAGS: /* Get interface flags */
John Dykstra746e6ad2009-06-11 20:57:21 -07004216 ifr->ifr_flags = (short) dev_get_flags(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 return 0;
4218
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 case SIOCGIFMETRIC: /* Get the metric on the interface
4220 (currently unused) */
4221 ifr->ifr_metric = 0;
4222 return 0;
4223
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 case SIOCGIFMTU: /* Get the MTU of a device */
4225 ifr->ifr_mtu = dev->mtu;
4226 return 0;
4227
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 case SIOCGIFHWADDR:
4229 if (!dev->addr_len)
4230 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4231 else
4232 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4233 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4234 ifr->ifr_hwaddr.sa_family = dev->type;
4235 return 0;
4236
Jeff Garzik14e3e072007-10-08 00:06:32 -07004237 case SIOCGIFSLAVE:
4238 err = -EINVAL;
4239 break;
4240
4241 case SIOCGIFMAP:
4242 ifr->ifr_map.mem_start = dev->mem_start;
4243 ifr->ifr_map.mem_end = dev->mem_end;
4244 ifr->ifr_map.base_addr = dev->base_addr;
4245 ifr->ifr_map.irq = dev->irq;
4246 ifr->ifr_map.dma = dev->dma;
4247 ifr->ifr_map.port = dev->if_port;
4248 return 0;
4249
4250 case SIOCGIFINDEX:
4251 ifr->ifr_ifindex = dev->ifindex;
4252 return 0;
4253
4254 case SIOCGIFTXQLEN:
4255 ifr->ifr_qlen = dev->tx_queue_len;
4256 return 0;
4257
4258 default:
4259 /* dev_ioctl() should ensure this case
4260 * is never reached
4261 */
4262 WARN_ON(1);
4263 err = -EINVAL;
4264 break;
4265
4266 }
4267 return err;
4268}
4269
4270/*
4271 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4272 */
4273static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4274{
4275 int err;
4276 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004277 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004278
4279 if (!dev)
4280 return -ENODEV;
4281
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004282 ops = dev->netdev_ops;
4283
Jeff Garzik14e3e072007-10-08 00:06:32 -07004284 switch (cmd) {
4285 case SIOCSIFFLAGS: /* Set interface flags */
4286 return dev_change_flags(dev, ifr->ifr_flags);
4287
4288 case SIOCSIFMETRIC: /* Set the metric on the interface
4289 (currently unused) */
4290 return -EOPNOTSUPP;
4291
4292 case SIOCSIFMTU: /* Set the MTU of a device */
4293 return dev_set_mtu(dev, ifr->ifr_mtu);
4294
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 case SIOCSIFHWADDR:
4296 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4297
4298 case SIOCSIFHWBROADCAST:
4299 if (ifr->ifr_hwaddr.sa_family != dev->type)
4300 return -EINVAL;
4301 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4302 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004303 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304 return 0;
4305
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 case SIOCSIFMAP:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004307 if (ops->ndo_set_config) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308 if (!netif_device_present(dev))
4309 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004310 return ops->ndo_set_config(dev, &ifr->ifr_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 }
4312 return -EOPNOTSUPP;
4313
4314 case SIOCADDMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004315 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4317 return -EINVAL;
4318 if (!netif_device_present(dev))
4319 return -ENODEV;
4320 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4321 dev->addr_len, 1);
4322
4323 case SIOCDELMULTI:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004324 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4326 return -EINVAL;
4327 if (!netif_device_present(dev))
4328 return -ENODEV;
4329 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4330 dev->addr_len, 1);
4331
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 case SIOCSIFTXQLEN:
4333 if (ifr->ifr_qlen < 0)
4334 return -EINVAL;
4335 dev->tx_queue_len = ifr->ifr_qlen;
4336 return 0;
4337
4338 case SIOCSIFNAME:
4339 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4340 return dev_change_name(dev, ifr->ifr_newname);
4341
4342 /*
4343 * Unknown or private ioctl
4344 */
4345
4346 default:
4347 if ((cmd >= SIOCDEVPRIVATE &&
4348 cmd <= SIOCDEVPRIVATE + 15) ||
4349 cmd == SIOCBONDENSLAVE ||
4350 cmd == SIOCBONDRELEASE ||
4351 cmd == SIOCBONDSETHWADDR ||
4352 cmd == SIOCBONDSLAVEINFOQUERY ||
4353 cmd == SIOCBONDINFOQUERY ||
4354 cmd == SIOCBONDCHANGEACTIVE ||
4355 cmd == SIOCGMIIPHY ||
4356 cmd == SIOCGMIIREG ||
4357 cmd == SIOCSMIIREG ||
4358 cmd == SIOCBRADDIF ||
4359 cmd == SIOCBRDELIF ||
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004360 cmd == SIOCSHWTSTAMP ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 cmd == SIOCWANDEV) {
4362 err = -EOPNOTSUPP;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004363 if (ops->ndo_do_ioctl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 if (netif_device_present(dev))
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004365 err = ops->ndo_do_ioctl(dev, ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 else
4367 err = -ENODEV;
4368 }
4369 } else
4370 err = -EINVAL;
4371
4372 }
4373 return err;
4374}
4375
4376/*
4377 * This function handles all "interface"-type I/O control requests. The actual
4378 * 'doing' part of this is dev_ifsioc above.
4379 */
4380
4381/**
4382 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004383 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004384 * @cmd: command to issue
4385 * @arg: pointer to a struct ifreq in user space
4386 *
4387 * Issue ioctl functions to devices. This is normally called by the
4388 * user space syscall interfaces but can sometimes be useful for
4389 * other purposes. The return value is the return from the syscall if
4390 * positive or a negative errno code on error.
4391 */
4392
Eric W. Biederman881d9662007-09-17 11:56:21 -07004393int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394{
4395 struct ifreq ifr;
4396 int ret;
4397 char *colon;
4398
4399 /* One special case: SIOCGIFCONF takes ifconf argument
4400 and requires shared lock, because it sleeps writing
4401 to user space.
4402 */
4403
4404 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004405 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004406 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004407 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 return ret;
4409 }
4410 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004411 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412
4413 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4414 return -EFAULT;
4415
4416 ifr.ifr_name[IFNAMSIZ-1] = 0;
4417
4418 colon = strchr(ifr.ifr_name, ':');
4419 if (colon)
4420 *colon = 0;
4421
4422 /*
4423 * See which interface the caller is talking about.
4424 */
4425
4426 switch (cmd) {
4427 /*
4428 * These ioctl calls:
4429 * - can be done by all.
4430 * - atomic and do not require locking.
4431 * - return a value
4432 */
4433 case SIOCGIFFLAGS:
4434 case SIOCGIFMETRIC:
4435 case SIOCGIFMTU:
4436 case SIOCGIFHWADDR:
4437 case SIOCGIFSLAVE:
4438 case SIOCGIFMAP:
4439 case SIOCGIFINDEX:
4440 case SIOCGIFTXQLEN:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004441 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 read_lock(&dev_base_lock);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004443 ret = dev_ifsioc_locked(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 read_unlock(&dev_base_lock);
4445 if (!ret) {
4446 if (colon)
4447 *colon = ':';
4448 if (copy_to_user(arg, &ifr,
4449 sizeof(struct ifreq)))
4450 ret = -EFAULT;
4451 }
4452 return ret;
4453
4454 case SIOCETHTOOL:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004455 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004457 ret = dev_ethtool(net, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458 rtnl_unlock();
4459 if (!ret) {
4460 if (colon)
4461 *colon = ':';
4462 if (copy_to_user(arg, &ifr,
4463 sizeof(struct ifreq)))
4464 ret = -EFAULT;
4465 }
4466 return ret;
4467
4468 /*
4469 * These ioctl calls:
4470 * - require superuser power.
4471 * - require strict serialization.
4472 * - return a value
4473 */
4474 case SIOCGMIIPHY:
4475 case SIOCGMIIREG:
4476 case SIOCSIFNAME:
4477 if (!capable(CAP_NET_ADMIN))
4478 return -EPERM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004479 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004481 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 rtnl_unlock();
4483 if (!ret) {
4484 if (colon)
4485 *colon = ':';
4486 if (copy_to_user(arg, &ifr,
4487 sizeof(struct ifreq)))
4488 ret = -EFAULT;
4489 }
4490 return ret;
4491
4492 /*
4493 * These ioctl calls:
4494 * - require superuser power.
4495 * - require strict serialization.
4496 * - do not return a value
4497 */
4498 case SIOCSIFFLAGS:
4499 case SIOCSIFMETRIC:
4500 case SIOCSIFMTU:
4501 case SIOCSIFMAP:
4502 case SIOCSIFHWADDR:
4503 case SIOCSIFSLAVE:
4504 case SIOCADDMULTI:
4505 case SIOCDELMULTI:
4506 case SIOCSIFHWBROADCAST:
4507 case SIOCSIFTXQLEN:
4508 case SIOCSMIIREG:
4509 case SIOCBONDENSLAVE:
4510 case SIOCBONDRELEASE:
4511 case SIOCBONDSETHWADDR:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 case SIOCBONDCHANGEACTIVE:
4513 case SIOCBRADDIF:
4514 case SIOCBRDELIF:
Patrick Ohlyd24fff22009-02-12 05:03:40 +00004515 case SIOCSHWTSTAMP:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 if (!capable(CAP_NET_ADMIN))
4517 return -EPERM;
Thomas Grafcabcac02006-01-24 12:46:33 -08004518 /* fall through */
4519 case SIOCBONDSLAVEINFOQUERY:
4520 case SIOCBONDINFOQUERY:
Eric W. Biederman881d9662007-09-17 11:56:21 -07004521 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004523 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524 rtnl_unlock();
4525 return ret;
4526
4527 case SIOCGIFMEM:
4528 /* Get the per device memory space. We can add this but
4529 * currently do not support it */
4530 case SIOCSIFMEM:
4531 /* Set the per device memory buffer space.
4532 * Not applicable in our case */
4533 case SIOCSIFLINK:
4534 return -EINVAL;
4535
4536 /*
4537 * Unknown or private ioctl.
4538 */
4539 default:
4540 if (cmd == SIOCWANDEV ||
4541 (cmd >= SIOCDEVPRIVATE &&
4542 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004543 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004545 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 rtnl_unlock();
4547 if (!ret && copy_to_user(arg, &ifr,
4548 sizeof(struct ifreq)))
4549 ret = -EFAULT;
4550 return ret;
4551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 /* Take care of Wireless Extensions */
Johannes Berg295f4a12007-04-26 20:43:56 -07004553 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004554 return wext_handle_ioctl(net, &ifr, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 return -EINVAL;
4556 }
4557}
4558
4559
4560/**
4561 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004562 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 *
4564 * Returns a suitable unique value for a new device interface
4565 * number. The caller must hold the rtnl semaphore or the
4566 * dev_base_lock to be sure it remains unique.
4567 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004568static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569{
4570 static int ifindex;
4571 for (;;) {
4572 if (++ifindex <= 0)
4573 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004574 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 return ifindex;
4576 }
4577}
4578
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004580static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004582static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004585}
4586
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004587static void rollback_registered(struct net_device *dev)
4588{
4589 BUG_ON(dev_boot_phase);
4590 ASSERT_RTNL();
4591
4592 /* Some devices call without registering for initialization unwind. */
4593 if (dev->reg_state == NETREG_UNINITIALIZED) {
4594 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4595 "was registered\n", dev->name, dev);
4596
4597 WARN_ON(1);
4598 return;
4599 }
4600
4601 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4602
4603 /* If device is running, close it first. */
4604 dev_close(dev);
4605
4606 /* And unlink it from device chain. */
4607 unlist_netdevice(dev);
4608
4609 dev->reg_state = NETREG_UNREGISTERING;
4610
4611 synchronize_net();
4612
4613 /* Shutdown queueing discipline. */
4614 dev_shutdown(dev);
4615
4616
4617 /* Notify protocols, that we are about to destroy
4618 this device. They should clean all the things.
4619 */
4620 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4621
4622 /*
4623 * Flush the unicast and multicast chains
4624 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004625 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004626 dev_addr_discard(dev);
4627
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004628 if (dev->netdev_ops->ndo_uninit)
4629 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004630
4631 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004632 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004633
4634 /* Remove entries from kobject tree */
4635 netdev_unregister_kobject(dev);
4636
4637 synchronize_net();
4638
4639 dev_put(dev);
4640}
4641
David S. Millere8a04642008-07-17 00:34:19 -07004642static void __netdev_init_queue_locks_one(struct net_device *dev,
4643 struct netdev_queue *dev_queue,
4644 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004645{
4646 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004647 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004648 dev_queue->xmit_lock_owner = -1;
4649}
4650
4651static void netdev_init_queue_locks(struct net_device *dev)
4652{
David S. Millere8a04642008-07-17 00:34:19 -07004653 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4654 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004655}
4656
Herbert Xub63365a2008-10-23 01:11:29 -07004657unsigned long netdev_fix_features(unsigned long features, const char *name)
4658{
4659 /* Fix illegal SG+CSUM combinations. */
4660 if ((features & NETIF_F_SG) &&
4661 !(features & NETIF_F_ALL_CSUM)) {
4662 if (name)
4663 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4664 "checksum feature.\n", name);
4665 features &= ~NETIF_F_SG;
4666 }
4667
4668 /* TSO requires that SG is present as well. */
4669 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4670 if (name)
4671 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4672 "SG feature.\n", name);
4673 features &= ~NETIF_F_TSO;
4674 }
4675
4676 if (features & NETIF_F_UFO) {
4677 if (!(features & NETIF_F_GEN_CSUM)) {
4678 if (name)
4679 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4680 "since no NETIF_F_HW_CSUM feature.\n",
4681 name);
4682 features &= ~NETIF_F_UFO;
4683 }
4684
4685 if (!(features & NETIF_F_SG)) {
4686 if (name)
4687 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4688 "since no NETIF_F_SG feature.\n", name);
4689 features &= ~NETIF_F_UFO;
4690 }
4691 }
4692
4693 return features;
4694}
4695EXPORT_SYMBOL(netdev_fix_features);
4696
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697/**
4698 * register_netdevice - register a network device
4699 * @dev: device to register
4700 *
4701 * Take a completed network device structure and add it to the kernel
4702 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4703 * chain. 0 is returned on success. A negative errno code is returned
4704 * on a failure to set up the device, or if the name is a duplicate.
4705 *
4706 * Callers must hold the rtnl semaphore. You may want
4707 * register_netdev() instead of this.
4708 *
4709 * BUGS:
4710 * The locking appears insufficient to guarantee two parallel registers
4711 * will not get the same name.
4712 */
4713
4714int register_netdevice(struct net_device *dev)
4715{
4716 struct hlist_head *head;
4717 struct hlist_node *p;
4718 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004719 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720
4721 BUG_ON(dev_boot_phase);
4722 ASSERT_RTNL();
4723
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004724 might_sleep();
4725
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 /* When net_device's are persistent, this will be fatal. */
4727 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004728 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729
David S. Millerf1f28aa2008-07-15 00:08:33 -07004730 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004731 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004732 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734 dev->iflink = -1;
4735
4736 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004737 if (dev->netdev_ops->ndo_init) {
4738 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 if (ret) {
4740 if (ret > 0)
4741 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004742 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 }
4744 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004745
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 if (!dev_valid_name(dev->name)) {
4747 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004748 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 }
4750
Eric W. Biederman881d9662007-09-17 11:56:21 -07004751 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 if (dev->iflink == -1)
4753 dev->iflink = dev->ifindex;
4754
4755 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004756 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 hlist_for_each(p, head) {
4758 struct net_device *d
4759 = hlist_entry(p, struct net_device, name_hlist);
4760 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4761 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004762 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004766 /* Fix illegal checksum combinations */
4767 if ((dev->features & NETIF_F_HW_CSUM) &&
4768 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4769 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4770 dev->name);
4771 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4772 }
4773
4774 if ((dev->features & NETIF_F_NO_CSUM) &&
4775 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4776 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4777 dev->name);
4778 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4779 }
4780
Herbert Xub63365a2008-10-23 01:11:29 -07004781 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004783 /* Enable software GSO if SG is supported. */
4784 if (dev->features & NETIF_F_SG)
4785 dev->features |= NETIF_F_GSO;
4786
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004787 netdev_initialize_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004788 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004789 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004790 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004791 dev->reg_state = NETREG_REGISTERED;
4792
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793 /*
4794 * Default initial state at registry is that the
4795 * device is present.
4796 */
4797
4798 set_bit(__LINK_STATE_PRESENT, &dev->state);
4799
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004802 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
4804 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004805 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004806 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004807 if (ret) {
4808 rollback_registered(dev);
4809 dev->reg_state = NETREG_UNREGISTERED;
4810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811
4812out:
4813 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004814
4815err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004816 if (dev->netdev_ops->ndo_uninit)
4817 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004818 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819}
4820
4821/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004822 * init_dummy_netdev - init a dummy network device for NAPI
4823 * @dev: device to init
4824 *
4825 * This takes a network device structure and initialize the minimum
4826 * amount of fields so it can be used to schedule NAPI polls without
4827 * registering a full blown interface. This is to be used by drivers
4828 * that need to tie several hardware interfaces to a single NAPI
4829 * poll scheduler due to HW limitations.
4830 */
4831int init_dummy_netdev(struct net_device *dev)
4832{
4833 /* Clear everything. Note we don't initialize spinlocks
4834 * are they aren't supposed to be taken by any of the
4835 * NAPI code and this dummy netdev is supposed to be
4836 * only ever used for NAPI polls
4837 */
4838 memset(dev, 0, sizeof(struct net_device));
4839
4840 /* make sure we BUG if trying to hit standard
4841 * register/unregister code path
4842 */
4843 dev->reg_state = NETREG_DUMMY;
4844
4845 /* initialize the ref count */
4846 atomic_set(&dev->refcnt, 1);
4847
4848 /* NAPI wants this */
4849 INIT_LIST_HEAD(&dev->napi_list);
4850
4851 /* a dummy interface is started by default */
4852 set_bit(__LINK_STATE_PRESENT, &dev->state);
4853 set_bit(__LINK_STATE_START, &dev->state);
4854
4855 return 0;
4856}
4857EXPORT_SYMBOL_GPL(init_dummy_netdev);
4858
4859
4860/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004861 * register_netdev - register a network device
4862 * @dev: device to register
4863 *
4864 * Take a completed network device structure and add it to the kernel
4865 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4866 * chain. 0 is returned on success. A negative errno code is returned
4867 * on a failure to set up the device, or if the name is a duplicate.
4868 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004869 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870 * and expands the device name if you passed a format string to
4871 * alloc_netdev.
4872 */
4873int register_netdev(struct net_device *dev)
4874{
4875 int err;
4876
4877 rtnl_lock();
4878
4879 /*
4880 * If the name is a format string the caller wants us to do a
4881 * name allocation.
4882 */
4883 if (strchr(dev->name, '%')) {
4884 err = dev_alloc_name(dev, dev->name);
4885 if (err < 0)
4886 goto out;
4887 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004888
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889 err = register_netdevice(dev);
4890out:
4891 rtnl_unlock();
4892 return err;
4893}
4894EXPORT_SYMBOL(register_netdev);
4895
4896/*
4897 * netdev_wait_allrefs - wait until all references are gone.
4898 *
4899 * This is called when unregistering network devices.
4900 *
4901 * Any protocol or device that holds a reference should register
4902 * for netdevice notification, and cleanup and put back the
4903 * reference if they receive an UNREGISTER event.
4904 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004905 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 */
4907static void netdev_wait_allrefs(struct net_device *dev)
4908{
4909 unsigned long rebroadcast_time, warning_time;
4910
4911 rebroadcast_time = warning_time = jiffies;
4912 while (atomic_read(&dev->refcnt) != 0) {
4913 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004914 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915
4916 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004917 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918
4919 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4920 &dev->state)) {
4921 /* We must not have linkwatch events
4922 * pending on unregister. If this
4923 * happens, we simply run the queue
4924 * unscheduled, resulting in a noop
4925 * for this device.
4926 */
4927 linkwatch_run_queue();
4928 }
4929
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004930 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004931
4932 rebroadcast_time = jiffies;
4933 }
4934
4935 msleep(250);
4936
4937 if (time_after(jiffies, warning_time + 10 * HZ)) {
4938 printk(KERN_EMERG "unregister_netdevice: "
4939 "waiting for %s to become free. Usage "
4940 "count = %d\n",
4941 dev->name, atomic_read(&dev->refcnt));
4942 warning_time = jiffies;
4943 }
4944 }
4945}
4946
4947/* The sequence is:
4948 *
4949 * rtnl_lock();
4950 * ...
4951 * register_netdevice(x1);
4952 * register_netdevice(x2);
4953 * ...
4954 * unregister_netdevice(y1);
4955 * unregister_netdevice(y2);
4956 * ...
4957 * rtnl_unlock();
4958 * free_netdev(y1);
4959 * free_netdev(y2);
4960 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07004961 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004963 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 * without deadlocking with linkwatch via keventd.
4965 * 2) Since we run with the RTNL semaphore not held, we can sleep
4966 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07004967 *
4968 * We must not return until all unregister events added during
4969 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971void netdev_run_todo(void)
4972{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004973 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004976 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07004977
4978 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07004979
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 while (!list_empty(&list)) {
4981 struct net_device *dev
4982 = list_entry(list.next, struct net_device, todo_list);
4983 list_del(&dev->todo_list);
4984
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004985 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986 printk(KERN_ERR "network todo '%s' but state %d\n",
4987 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004988 dump_stack();
4989 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004991
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004992 dev->reg_state = NETREG_UNREGISTERED;
4993
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07004994 on_each_cpu(flush_backlog, dev, 1);
4995
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004996 netdev_wait_allrefs(dev);
4997
4998 /* paranoia */
4999 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005000 WARN_ON(dev->ip_ptr);
5001 WARN_ON(dev->ip6_ptr);
5002 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005003
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005004 if (dev->destructor)
5005 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005006
5007 /* Free network device */
5008 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010}
5011
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005012/**
5013 * dev_get_stats - get network device statistics
5014 * @dev: device to get statistics from
5015 *
5016 * Get network statistics from device. The device driver may provide
5017 * its own method by setting dev->netdev_ops->get_stats; otherwise
5018 * the internal statistics structure is used.
5019 */
5020const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005021{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005022 const struct net_device_ops *ops = dev->netdev_ops;
5023
5024 if (ops->ndo_get_stats)
5025 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005026 else {
5027 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5028 struct net_device_stats *stats = &dev->stats;
5029 unsigned int i;
5030 struct netdev_queue *txq;
5031
5032 for (i = 0; i < dev->num_tx_queues; i++) {
5033 txq = netdev_get_tx_queue(dev, i);
5034 tx_bytes += txq->tx_bytes;
5035 tx_packets += txq->tx_packets;
5036 tx_dropped += txq->tx_dropped;
5037 }
5038 if (tx_bytes || tx_packets || tx_dropped) {
5039 stats->tx_bytes = tx_bytes;
5040 stats->tx_packets = tx_packets;
5041 stats->tx_dropped = tx_dropped;
5042 }
5043 return stats;
5044 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005045}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005046EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005047
David S. Millerdc2b4842008-07-08 17:18:23 -07005048static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005049 struct netdev_queue *queue,
5050 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005051{
David S. Millerdc2b4842008-07-08 17:18:23 -07005052 queue->dev = dev;
5053}
5054
David S. Millerbb949fb2008-07-08 16:55:56 -07005055static void netdev_init_queues(struct net_device *dev)
5056{
David S. Millere8a04642008-07-17 00:34:19 -07005057 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5058 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005059 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005060}
5061
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005063 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064 * @sizeof_priv: size of private data to allocate space for
5065 * @name: device name format string
5066 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005067 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068 *
5069 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005070 * and performs basic initialization. Also allocates subquue structs
5071 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005073struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5074 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075{
David S. Millere8a04642008-07-17 00:34:19 -07005076 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005077 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005078 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005079 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005081 BUG_ON(strlen(name) >= sizeof(dev->name));
5082
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005083 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005084 if (sizeof_priv) {
5085 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005086 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005087 alloc_size += sizeof_priv;
5088 }
5089 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005090 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005092 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005094 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 return NULL;
5096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097
Stephen Hemminger79439862008-07-21 13:28:44 -07005098 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005099 if (!tx) {
5100 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5101 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005102 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005103 }
5104
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005105 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005107
5108 if (dev_addr_init(dev))
5109 goto free_tx;
5110
Jiri Pirkoccffad252009-05-22 23:22:17 +00005111 dev_unicast_init(dev);
5112
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005113 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
David S. Millere8a04642008-07-17 00:34:19 -07005115 dev->_tx = tx;
5116 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005117 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005118
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005119 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120
David S. Millerbb949fb2008-07-08 16:55:56 -07005121 netdev_init_queues(dev);
5122
Herbert Xud565b0a2008-12-15 23:38:52 -08005123 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005124 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005125 setup(dev);
5126 strcpy(dev->name, name);
5127 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005128
5129free_tx:
5130 kfree(tx);
5131
5132free_p:
5133 kfree(p);
5134 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005136EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
5138/**
5139 * free_netdev - free network device
5140 * @dev: device
5141 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005142 * This function does the last stage of destroying an allocated device
5143 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005144 * If this is the last reference then it will be freed.
5145 */
5146void free_netdev(struct net_device *dev)
5147{
Herbert Xud565b0a2008-12-15 23:38:52 -08005148 struct napi_struct *p, *n;
5149
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005150 release_net(dev_net(dev));
5151
David S. Millere8a04642008-07-17 00:34:19 -07005152 kfree(dev->_tx);
5153
Jiri Pirkof001fde2009-05-05 02:48:28 +00005154 /* Flush device addresses */
5155 dev_addr_flush(dev);
5156
Herbert Xud565b0a2008-12-15 23:38:52 -08005157 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5158 netif_napi_del(p);
5159
Stephen Hemminger3041a062006-05-26 13:25:24 -07005160 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 if (dev->reg_state == NETREG_UNINITIALIZED) {
5162 kfree((char *)dev - dev->padded);
5163 return;
5164 }
5165
5166 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5167 dev->reg_state = NETREG_RELEASED;
5168
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005169 /* will free via device release */
5170 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171}
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005172
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005173/**
5174 * synchronize_net - Synchronize with packet receive processing
5175 *
5176 * Wait for packets currently being received to be done.
5177 * Does not block later packets from starting.
5178 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005179void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180{
5181 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005182 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183}
5184
5185/**
5186 * unregister_netdevice - remove device from the kernel
5187 * @dev: device
5188 *
5189 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005190 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191 *
5192 * Callers must hold the rtnl semaphore. You may want
5193 * unregister_netdev() instead of this.
5194 */
5195
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005196void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197{
Herbert Xua6620712007-12-12 19:21:56 -08005198 ASSERT_RTNL();
5199
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005200 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201 /* Finish processing unregister after unlock */
5202 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
5205/**
5206 * unregister_netdev - remove device from the kernel
5207 * @dev: device
5208 *
5209 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005210 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211 *
5212 * This is just a wrapper for unregister_netdevice that takes
5213 * the rtnl semaphore. In general you want to use this and not
5214 * unregister_netdevice.
5215 */
5216void unregister_netdev(struct net_device *dev)
5217{
5218 rtnl_lock();
5219 unregister_netdevice(dev);
5220 rtnl_unlock();
5221}
5222
5223EXPORT_SYMBOL(unregister_netdev);
5224
Eric W. Biedermance286d32007-09-12 13:53:49 +02005225/**
5226 * dev_change_net_namespace - move device to different nethost namespace
5227 * @dev: device
5228 * @net: network namespace
5229 * @pat: If not NULL name pattern to try if the current device name
5230 * is already taken in the destination network namespace.
5231 *
5232 * This function shuts down a device interface and moves it
5233 * to a new network namespace. On success 0 is returned, on
5234 * a failure a netagive errno code is returned.
5235 *
5236 * Callers must hold the rtnl semaphore.
5237 */
5238
5239int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5240{
5241 char buf[IFNAMSIZ];
5242 const char *destname;
5243 int err;
5244
5245 ASSERT_RTNL();
5246
5247 /* Don't allow namespace local devices to be moved. */
5248 err = -EINVAL;
5249 if (dev->features & NETIF_F_NETNS_LOCAL)
5250 goto out;
5251
Eric W. Biederman38918452008-10-27 17:51:47 -07005252#ifdef CONFIG_SYSFS
5253 /* Don't allow real devices to be moved when sysfs
5254 * is enabled.
5255 */
5256 err = -EINVAL;
5257 if (dev->dev.parent)
5258 goto out;
5259#endif
5260
Eric W. Biedermance286d32007-09-12 13:53:49 +02005261 /* Ensure the device has been registrered */
5262 err = -EINVAL;
5263 if (dev->reg_state != NETREG_REGISTERED)
5264 goto out;
5265
5266 /* Get out if there is nothing todo */
5267 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005268 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005269 goto out;
5270
5271 /* Pick the destination device name, and ensure
5272 * we can use it in the destination network namespace.
5273 */
5274 err = -EEXIST;
5275 destname = dev->name;
5276 if (__dev_get_by_name(net, destname)) {
5277 /* We get here if we can't use the current device name */
5278 if (!pat)
5279 goto out;
5280 if (!dev_valid_name(pat))
5281 goto out;
5282 if (strchr(pat, '%')) {
5283 if (__dev_alloc_name(net, pat, buf) < 0)
5284 goto out;
5285 destname = buf;
5286 } else
5287 destname = pat;
5288 if (__dev_get_by_name(net, destname))
5289 goto out;
5290 }
5291
5292 /*
5293 * And now a mini version of register_netdevice unregister_netdevice.
5294 */
5295
5296 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005297 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005298
5299 /* And unlink it from device chain */
5300 err = -ENODEV;
5301 unlist_netdevice(dev);
5302
5303 synchronize_net();
5304
5305 /* Shutdown queueing discipline. */
5306 dev_shutdown(dev);
5307
5308 /* Notify protocols, that we are about to destroy
5309 this device. They should clean all the things.
5310 */
5311 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5312
5313 /*
5314 * Flush the unicast and multicast chains
5315 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005316 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005317 dev_addr_discard(dev);
5318
Eric W. Biederman38918452008-10-27 17:51:47 -07005319 netdev_unregister_kobject(dev);
5320
Eric W. Biedermance286d32007-09-12 13:53:49 +02005321 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005322 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005323
5324 /* Assign the new device name */
5325 if (destname != dev->name)
5326 strcpy(dev->name, destname);
5327
5328 /* If there is an ifindex conflict assign a new one */
5329 if (__dev_get_by_index(net, dev->ifindex)) {
5330 int iflink = (dev->iflink == dev->ifindex);
5331 dev->ifindex = dev_new_index(net);
5332 if (iflink)
5333 dev->iflink = dev->ifindex;
5334 }
5335
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005336 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005337 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005338 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005339
5340 /* Add the device back in the hashes */
5341 list_netdevice(dev);
5342
5343 /* Notify protocols, that a new device appeared. */
5344 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5345
5346 synchronize_net();
5347 err = 0;
5348out:
5349 return err;
5350}
5351
Linus Torvalds1da177e2005-04-16 15:20:36 -07005352static int dev_cpu_callback(struct notifier_block *nfb,
5353 unsigned long action,
5354 void *ocpu)
5355{
5356 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005357 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358 struct sk_buff *skb;
5359 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5360 struct softnet_data *sd, *oldsd;
5361
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005362 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363 return NOTIFY_OK;
5364
5365 local_irq_disable();
5366 cpu = smp_processor_id();
5367 sd = &per_cpu(softnet_data, cpu);
5368 oldsd = &per_cpu(softnet_data, oldcpu);
5369
5370 /* Find end of our completion_queue. */
5371 list_skb = &sd->completion_queue;
5372 while (*list_skb)
5373 list_skb = &(*list_skb)->next;
5374 /* Append completion queue from offline CPU. */
5375 *list_skb = oldsd->completion_queue;
5376 oldsd->completion_queue = NULL;
5377
5378 /* Find end of our output_queue. */
5379 list_net = &sd->output_queue;
5380 while (*list_net)
5381 list_net = &(*list_net)->next_sched;
5382 /* Append output queue from offline CPU. */
5383 *list_net = oldsd->output_queue;
5384 oldsd->output_queue = NULL;
5385
5386 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5387 local_irq_enable();
5388
5389 /* Process offline CPU's input_pkt_queue */
5390 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5391 netif_rx(skb);
5392
5393 return NOTIFY_OK;
5394}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395
5396
Herbert Xu7f353bf2007-08-10 15:47:58 -07005397/**
Herbert Xub63365a2008-10-23 01:11:29 -07005398 * netdev_increment_features - increment feature set by one
5399 * @all: current feature set
5400 * @one: new feature set
5401 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005402 *
5403 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005404 * @one to the master device with current feature set @all. Will not
5405 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005406 */
Herbert Xub63365a2008-10-23 01:11:29 -07005407unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5408 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005409{
Herbert Xub63365a2008-10-23 01:11:29 -07005410 /* If device needs checksumming, downgrade to it. */
5411 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5412 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5413 else if (mask & NETIF_F_ALL_CSUM) {
5414 /* If one device supports v4/v6 checksumming, set for all. */
5415 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5416 !(all & NETIF_F_GEN_CSUM)) {
5417 all &= ~NETIF_F_ALL_CSUM;
5418 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5419 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005420
Herbert Xub63365a2008-10-23 01:11:29 -07005421 /* If one device supports hw checksumming, set for all. */
5422 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5423 all &= ~NETIF_F_ALL_CSUM;
5424 all |= NETIF_F_HW_CSUM;
5425 }
5426 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005427
Herbert Xub63365a2008-10-23 01:11:29 -07005428 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005429
Herbert Xub63365a2008-10-23 01:11:29 -07005430 one |= all & NETIF_F_ONE_FOR_ALL;
5431 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5432 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005433
5434 return all;
5435}
Herbert Xub63365a2008-10-23 01:11:29 -07005436EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005437
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005438static struct hlist_head *netdev_create_hash(void)
5439{
5440 int i;
5441 struct hlist_head *hash;
5442
5443 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5444 if (hash != NULL)
5445 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5446 INIT_HLIST_HEAD(&hash[i]);
5447
5448 return hash;
5449}
5450
Eric W. Biederman881d9662007-09-17 11:56:21 -07005451/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005452static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005453{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005454 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005455
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005456 net->dev_name_head = netdev_create_hash();
5457 if (net->dev_name_head == NULL)
5458 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005459
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005460 net->dev_index_head = netdev_create_hash();
5461 if (net->dev_index_head == NULL)
5462 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005463
5464 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005465
5466err_idx:
5467 kfree(net->dev_name_head);
5468err_name:
5469 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005470}
5471
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005472/**
5473 * netdev_drivername - network driver for the device
5474 * @dev: network device
5475 * @buffer: buffer for resulting name
5476 * @len: size of buffer
5477 *
5478 * Determine network driver for device.
5479 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005480char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005481{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005482 const struct device_driver *driver;
5483 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005484
5485 if (len <= 0 || !buffer)
5486 return buffer;
5487 buffer[0] = 0;
5488
5489 parent = dev->dev.parent;
5490
5491 if (!parent)
5492 return buffer;
5493
5494 driver = parent->driver;
5495 if (driver && driver->name)
5496 strlcpy(buffer, driver->name, len);
5497 return buffer;
5498}
5499
Pavel Emelyanov46650792007-10-08 20:38:39 -07005500static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005501{
5502 kfree(net->dev_name_head);
5503 kfree(net->dev_index_head);
5504}
5505
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005506static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005507 .init = netdev_init,
5508 .exit = netdev_exit,
5509};
5510
Pavel Emelyanov46650792007-10-08 20:38:39 -07005511static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005512{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005513 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005514 /*
5515 * Push all migratable of the network devices back to the
5516 * initial network namespace
5517 */
5518 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005519restart:
5520 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005521 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005522 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005523
5524 /* Ignore unmoveable devices (i.e. loopback) */
5525 if (dev->features & NETIF_F_NETNS_LOCAL)
5526 continue;
5527
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005528 /* Delete virtual devices */
5529 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5530 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005531 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005532 }
5533
Eric W. Biedermance286d32007-09-12 13:53:49 +02005534 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005535 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5536 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005537 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005538 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005539 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005540 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005541 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005542 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005543 }
5544 rtnl_unlock();
5545}
5546
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005547static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005548 .exit = default_device_exit,
5549};
5550
Linus Torvalds1da177e2005-04-16 15:20:36 -07005551/*
5552 * Initialize the DEV module. At boot time this walks the device list and
5553 * unhooks any devices that fail to initialise (normally hardware not
5554 * present) and leaves us with a valid list of present and active devices.
5555 *
5556 */
5557
5558/*
5559 * This is called single threaded during boot, so no need
5560 * to take the rtnl semaphore.
5561 */
5562static int __init net_dev_init(void)
5563{
5564 int i, rc = -ENOMEM;
5565
5566 BUG_ON(!dev_boot_phase);
5567
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568 if (dev_proc_init())
5569 goto out;
5570
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005571 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572 goto out;
5573
5574 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005575 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576 INIT_LIST_HEAD(&ptype_base[i]);
5577
Eric W. Biederman881d9662007-09-17 11:56:21 -07005578 if (register_pernet_subsys(&netdev_net_ops))
5579 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580
5581 /*
5582 * Initialise the packet receive queues.
5583 */
5584
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005585 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005586 struct softnet_data *queue;
5587
5588 queue = &per_cpu(softnet_data, i);
5589 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005590 queue->completion_queue = NULL;
5591 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005592
5593 queue->backlog.poll = process_backlog;
5594 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005595 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005596 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597 }
5598
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599 dev_boot_phase = 0;
5600
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005601 /* The loopback device is special if any other network devices
5602 * is present in a network namespace the loopback device must
5603 * be present. Since we now dynamically allocate and free the
5604 * loopback device ensure this invariant is maintained by
5605 * keeping the loopback device as the first device on the
5606 * list of network devices. Ensuring the loopback devices
5607 * is the first device that appears and the last network device
5608 * that disappears.
5609 */
5610 if (register_pernet_device(&loopback_net_ops))
5611 goto out;
5612
5613 if (register_pernet_device(&default_device_ops))
5614 goto out;
5615
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005616 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5617 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005618
5619 hotcpu_notifier(dev_cpu_callback, 0);
5620 dst_init();
5621 dev_mcast_init();
5622 rc = 0;
5623out:
5624 return rc;
5625}
5626
5627subsys_initcall(net_dev_init);
5628
Krishna Kumare88721f2009-02-18 17:55:02 -08005629static int __init initialize_hashrnd(void)
5630{
5631 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5632 return 0;
5633}
5634
5635late_initcall_sync(initialize_hashrnd);
5636
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637EXPORT_SYMBOL(__dev_get_by_index);
5638EXPORT_SYMBOL(__dev_get_by_name);
5639EXPORT_SYMBOL(__dev_remove_pack);
Mitch Williamsc2373ee2005-11-09 10:34:45 -08005640EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641EXPORT_SYMBOL(dev_add_pack);
5642EXPORT_SYMBOL(dev_alloc_name);
5643EXPORT_SYMBOL(dev_close);
5644EXPORT_SYMBOL(dev_get_by_flags);
5645EXPORT_SYMBOL(dev_get_by_index);
5646EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647EXPORT_SYMBOL(dev_open);
5648EXPORT_SYMBOL(dev_queue_xmit);
5649EXPORT_SYMBOL(dev_remove_pack);
5650EXPORT_SYMBOL(dev_set_allmulti);
5651EXPORT_SYMBOL(dev_set_promiscuity);
5652EXPORT_SYMBOL(dev_change_flags);
5653EXPORT_SYMBOL(dev_set_mtu);
5654EXPORT_SYMBOL(dev_set_mac_address);
5655EXPORT_SYMBOL(free_netdev);
5656EXPORT_SYMBOL(netdev_boot_setup_check);
5657EXPORT_SYMBOL(netdev_set_master);
5658EXPORT_SYMBOL(netdev_state_change);
5659EXPORT_SYMBOL(netif_receive_skb);
5660EXPORT_SYMBOL(netif_rx);
5661EXPORT_SYMBOL(register_gifconf);
5662EXPORT_SYMBOL(register_netdevice);
5663EXPORT_SYMBOL(register_netdevice_notifier);
5664EXPORT_SYMBOL(skb_checksum_help);
5665EXPORT_SYMBOL(synchronize_net);
5666EXPORT_SYMBOL(unregister_netdevice);
5667EXPORT_SYMBOL(unregister_netdevice_notifier);
5668EXPORT_SYMBOL(net_enable_timestamp);
5669EXPORT_SYMBOL(net_disable_timestamp);
5670EXPORT_SYMBOL(dev_get_flags);
5671
Linus Torvalds1da177e2005-04-16 15:20:36 -07005672EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673
5674EXPORT_PER_CPU_SYMBOL(softnet_data);