blob: 510ff205d5dbd8e318749f21bd8a31c671f59fcf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080078#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080083#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
Ben Hutchings0187bdf2008-06-19 16:15:47 -070093#include <linux/ethtool.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <linux/notifier.h>
95#include <linux/skbuff.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020096#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
Patrick McHardyb863ceb2007-07-14 18:55:06 -0700103#include <linux/if_macvlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
Johannes Berg295f4a12007-04-26 20:43:56 -0700114#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#include <net/iw_handler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#include <asm/current.h>
Steve Grubb5bdb9882005-12-03 08:39:35 -0500117#include <linux/audit.h>
Chris Leechdb217332006-06-17 21:24:58 -0700118#include <linux/dmaengine.h>
Herbert Xuf6a78bf2006-06-22 02:57:17 -0700119#include <linux/err.h>
David S. Millerc7fa9d12006-08-15 16:34:13 -0700120#include <linux/ctype.h>
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700121#include <linux/if_arp.h>
Ben Hutchings6de329e2008-06-16 17:02:28 -0700122#include <linux/if_vlan.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700123#include <linux/ip.h>
Alexander Duyckad55dca2008-09-20 22:05:50 -0700124#include <net/ip.h>
David S. Miller8f0f2222008-07-15 03:47:03 -0700125#include <linux/ipv6.h>
126#include <linux/in.h>
David S. Millerb6b2fed2008-07-21 09:48:06 -0700127#include <linux/jhash.h>
128#include <linux/random.h>
David S. Miller9cbc1cb2009-06-15 03:02:23 -0700129#include <trace/events/napi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Pavel Emelyanov342709e2007-10-23 21:14:45 -0700131#include "net-sysfs.h"
132
Herbert Xud565b0a2008-12-15 23:38:52 -0800133/* Instead of increasing this, you should create a hash table. */
134#define MAX_GRO_SKBS 8
135
Herbert Xu5d38a072009-01-04 16:13:40 -0800136/* This should be increased if a protocol with a bigger head is added. */
137#define GRO_MAX_HEAD (MAX_HEADER + 128)
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139/*
140 * The list of packet types we will receive (as opposed to discard)
141 * and the routines to invoke.
142 *
143 * Why 16. Because with 16 the only overlap we get on a hash of the
144 * low nibble of the protocol value is RARP/SNAP/X.25.
145 *
146 * NOTE: That is no longer true with the addition of VLAN tags. Not
147 * sure which should go first, but I bet it won't make much
148 * difference if we are running VLANs. The good news is that
149 * this protocol won't be in the list unless compiled in, so
Stephen Hemminger3041a062006-05-26 13:25:24 -0700150 * the average user (w/out VLANs) will not be adversely affected.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 * --BLG
152 *
153 * 0800 IP
154 * 8100 802.1Q VLAN
155 * 0001 802.3
156 * 0002 AX.25
157 * 0004 802.2
158 * 8035 RARP
159 * 0005 SNAP
160 * 0805 X.25
161 * 0806 ARP
162 * 8137 IPX
163 * 0009 Localtalk
164 * 86DD IPv6
165 */
166
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800167#define PTYPE_HASH_SIZE (16)
168#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static DEFINE_SPINLOCK(ptype_lock);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800171static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -0700172static struct list_head ptype_all __read_mostly; /* Taps */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Pavel Emelianov7562f872007-05-03 15:13:45 -0700175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
Pavel Emelianov7562f872007-05-03 15:13:45 -0700181 * dev_base_head list, and hold dev_base_lock for writing when they do the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193DEFINE_RWLOCK(dev_base_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194EXPORT_SYMBOL(dev_base_lock);
195
196#define NETDEV_HASHBITS 8
Eric W. Biederman881d9662007-09-17 11:56:21 -0700197#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Eric W. Biederman881d9662007-09-17 11:56:21 -0700199static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200{
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
Eric W. Biederman881d9662007-09-17 11:56:21 -0700202 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Eric W. Biederman881d9662007-09-17 11:56:21 -0700205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Eric W. Biederman881d9662007-09-17 11:56:21 -0700207 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208}
209
Eric W. Biedermance286d32007-09-12 13:53:49 +0200210/* Device list insertion */
211static int list_netdevice(struct net_device *dev)
212{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900213 struct net *net = dev_net(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +0200214
215 ASSERT_RTNL();
216
217 write_lock_bh(&dev_base_lock);
218 list_add_tail(&dev->dev_list, &net->dev_base_head);
219 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
220 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
221 write_unlock_bh(&dev_base_lock);
222 return 0;
223}
224
225/* Device list removal */
226static void unlist_netdevice(struct net_device *dev)
227{
228 ASSERT_RTNL();
229
230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist);
234 hlist_del(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock);
236}
237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238/*
239 * Our notifier list
240 */
241
Alan Sternf07d5b92006-05-09 15:23:03 -0700242static RAW_NOTIFIER_HEAD(netdev_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Device drivers call our routines to queue packets here. We empty the
246 * queue in the local softnet handler.
247 */
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700248
249DEFINE_PER_CPU(struct softnet_data, softnet_data);
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700250EXPORT_PER_CPU_SYMBOL(softnet_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
David S. Millercf508b12008-07-22 14:16:42 -0700252#ifdef CONFIG_LOCKDEP
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700253/*
David S. Millerc773e842008-07-08 23:13:53 -0700254 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700255 * according to dev->type
256 */
257static const unsigned short netdev_lock_type[] =
258 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
259 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
260 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
261 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
262 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
263 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
264 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
265 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
266 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
267 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
268 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
269 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
270 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800271 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400272 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000273 ARPHRD_VOID, ARPHRD_NONE};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700274
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700275static const char *const netdev_lock_name[] =
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700276 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
277 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
278 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
279 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
280 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
281 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
282 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
283 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
284 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
285 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
286 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
287 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
288 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
Rémi Denis-Courmont2d91d782008-12-17 15:47:29 -0800289 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
Dmitry Eremin-Solenikov929122cd2009-08-14 20:00:20 +0400290 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
Sergey Lapinfcb94e42009-06-08 12:18:47 +0000291 "_xmit_VOID", "_xmit_NONE"};
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700292
293static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
David S. Millercf508b12008-07-22 14:16:42 -0700294static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700295
296static inline unsigned short netdev_lock_pos(unsigned short dev_type)
297{
298 int i;
299
300 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
301 if (netdev_lock_type[i] == dev_type)
302 return i;
303 /* the last key is used by default */
304 return ARRAY_SIZE(netdev_lock_type) - 1;
305}
306
David S. Millercf508b12008-07-22 14:16:42 -0700307static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
308 unsigned short dev_type)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700309{
310 int i;
311
312 i = netdev_lock_pos(dev_type);
313 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
314 netdev_lock_name[i]);
315}
David S. Millercf508b12008-07-22 14:16:42 -0700316
317static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
318{
319 int i;
320
321 i = netdev_lock_pos(dev->type);
322 lockdep_set_class_and_name(&dev->addr_list_lock,
323 &netdev_addr_lock_key[i],
324 netdev_lock_name[i]);
325}
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700326#else
David S. Millercf508b12008-07-22 14:16:42 -0700327static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
328 unsigned short dev_type)
329{
330}
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
Jarek Poplawski723e98b2007-05-15 22:46:18 -0700332{
333}
334#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
336/*******************************************************************************
337
338 Protocol management and registration routines
339
340*******************************************************************************/
341
342/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Add a protocol ID to the list. Now that the input handler is
344 * smarter we can dispense with all the messy stuff that used to be
345 * here.
346 *
347 * BEWARE!!! Protocol handlers, mangling input packets,
348 * MUST BE last in hash buckets and checking protocol handlers
349 * MUST start from promiscuous ptype_all chain in net_bh.
350 * It is true now, do not change it.
351 * Explanation follows: if protocol handler, mangling packet, will
352 * be the first on list, it is not able to sense, that packet
353 * is cloned and should be copied-on-write, so that it will
354 * change it and subsequent readers will get broken packet.
355 * --ANK (980803)
356 */
357
358/**
359 * dev_add_pack - add packet handler
360 * @pt: packet type declaration
361 *
362 * Add a protocol handler to the networking stack. The passed &packet_type
363 * is linked into kernel lists and may not be freed until it has been
364 * removed from the kernel lists.
365 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900366 * This call does not sleep therefore it can not
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * guarantee all CPU's that are in middle of receiving packets
368 * will see the new packet type (until the next received packet).
369 */
370
371void dev_add_pack(struct packet_type *pt)
372{
373 int hash;
374
375 spin_lock_bh(&ptype_lock);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700376 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 list_add_rcu(&pt->list, &ptype_all);
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700378 else {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800379 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 list_add_rcu(&pt->list, &ptype_base[hash]);
381 }
382 spin_unlock_bh(&ptype_lock);
383}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700384EXPORT_SYMBOL(dev_add_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386/**
387 * __dev_remove_pack - remove packet handler
388 * @pt: packet type declaration
389 *
390 * Remove a protocol handler that was previously added to the kernel
391 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
392 * from the kernel lists and can be freed or reused once this function
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900393 * returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 *
395 * The packet type might still be in use by receivers
396 * and must not be freed until after all the CPU's have gone
397 * through a quiescent state.
398 */
399void __dev_remove_pack(struct packet_type *pt)
400{
401 struct list_head *head;
402 struct packet_type *pt1;
403
404 spin_lock_bh(&ptype_lock);
405
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700406 if (pt->type == htons(ETH_P_ALL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 head = &ptype_all;
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -0700408 else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +0800409 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 list_for_each_entry(pt1, head, list) {
412 if (pt == pt1) {
413 list_del_rcu(&pt->list);
414 goto out;
415 }
416 }
417
418 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
419out:
420 spin_unlock_bh(&ptype_lock);
421}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700422EXPORT_SYMBOL(__dev_remove_pack);
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/**
425 * dev_remove_pack - remove packet handler
426 * @pt: packet type declaration
427 *
428 * Remove a protocol handler that was previously added to the kernel
429 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
430 * from the kernel lists and can be freed or reused once this function
431 * returns.
432 *
433 * This call sleeps to guarantee that no CPU is looking at the packet
434 * type after return.
435 */
436void dev_remove_pack(struct packet_type *pt)
437{
438 __dev_remove_pack(pt);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900439
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 synchronize_net();
441}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700442EXPORT_SYMBOL(dev_remove_pack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/******************************************************************************
445
446 Device Boot-time Settings Routines
447
448*******************************************************************************/
449
450/* Boot time configuration table */
451static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
452
453/**
454 * netdev_boot_setup_add - add new setup entry
455 * @name: name of the device
456 * @map: configured settings for the device
457 *
458 * Adds new setup entry to the dev_boot_setup list. The function
459 * returns 0 on error and 1 on success. This is a generic routine to
460 * all netdevices.
461 */
462static int netdev_boot_setup_add(char *name, struct ifmap *map)
463{
464 struct netdev_boot_setup *s;
465 int i;
466
467 s = dev_boot_setup;
468 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
469 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
470 memset(s[i].name, 0, sizeof(s[i].name));
Wang Chen93b3cff2008-07-01 19:57:19 -0700471 strlcpy(s[i].name, name, IFNAMSIZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 memcpy(&s[i].map, map, sizeof(s[i].map));
473 break;
474 }
475 }
476
477 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
478}
479
480/**
481 * netdev_boot_setup_check - check boot time settings
482 * @dev: the netdevice
483 *
484 * Check boot time settings for the device.
485 * The found settings are set for the device to be used
486 * later in the device probing.
487 * Returns 0 if no settings found, 1 if they are.
488 */
489int netdev_boot_setup_check(struct net_device *dev)
490{
491 struct netdev_boot_setup *s = dev_boot_setup;
492 int i;
493
494 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
495 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
Wang Chen93b3cff2008-07-01 19:57:19 -0700496 !strcmp(dev->name, s[i].name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 dev->irq = s[i].map.irq;
498 dev->base_addr = s[i].map.base_addr;
499 dev->mem_start = s[i].map.mem_start;
500 dev->mem_end = s[i].map.mem_end;
501 return 1;
502 }
503 }
504 return 0;
505}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700506EXPORT_SYMBOL(netdev_boot_setup_check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508
509/**
510 * netdev_boot_base - get address from boot time settings
511 * @prefix: prefix for network device
512 * @unit: id for network device
513 *
514 * Check boot time settings for the base address of device.
515 * The found settings are set for the device to be used
516 * later in the device probing.
517 * Returns 0 if no settings found.
518 */
519unsigned long netdev_boot_base(const char *prefix, int unit)
520{
521 const struct netdev_boot_setup *s = dev_boot_setup;
522 char name[IFNAMSIZ];
523 int i;
524
525 sprintf(name, "%s%d", prefix, unit);
526
527 /*
528 * If device already registered then return base of 1
529 * to indicate not to probe for this interface
530 */
Eric W. Biederman881d9662007-09-17 11:56:21 -0700531 if (__dev_get_by_name(&init_net, name))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 return 1;
533
534 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
535 if (!strcmp(name, s[i].name))
536 return s[i].map.base_addr;
537 return 0;
538}
539
540/*
541 * Saves at boot time configured settings for any netdevice.
542 */
543int __init netdev_boot_setup(char *str)
544{
545 int ints[5];
546 struct ifmap map;
547
548 str = get_options(str, ARRAY_SIZE(ints), ints);
549 if (!str || !*str)
550 return 0;
551
552 /* Save settings */
553 memset(&map, 0, sizeof(map));
554 if (ints[0] > 0)
555 map.irq = ints[1];
556 if (ints[0] > 1)
557 map.base_addr = ints[2];
558 if (ints[0] > 2)
559 map.mem_start = ints[3];
560 if (ints[0] > 3)
561 map.mem_end = ints[4];
562
563 /* Add new entry to the list */
564 return netdev_boot_setup_add(str, &map);
565}
566
567__setup("netdev=", netdev_boot_setup);
568
569/*******************************************************************************
570
571 Device Interface Subroutines
572
573*******************************************************************************/
574
575/**
576 * __dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700577 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * @name: name to find
579 *
580 * Find an interface by name. Must be called under RTNL semaphore
581 * or @dev_base_lock. If the name is found a pointer to the device
582 * is returned. If the name is not found then %NULL is returned. The
583 * reference counters are not incremented so the caller must be
584 * careful with locks.
585 */
586
Eric W. Biederman881d9662007-09-17 11:56:21 -0700587struct net_device *__dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 struct hlist_node *p;
590
Eric W. Biederman881d9662007-09-17 11:56:21 -0700591 hlist_for_each(p, dev_name_hash(net, name)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 struct net_device *dev
593 = hlist_entry(p, struct net_device, name_hlist);
594 if (!strncmp(dev->name, name, IFNAMSIZ))
595 return dev;
596 }
597 return NULL;
598}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700599EXPORT_SYMBOL(__dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601/**
602 * dev_get_by_name - find a device by its name
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700603 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 * @name: name to find
605 *
606 * Find an interface by name. This can be called from any
607 * context and does its own locking. The returned handle has
608 * the usage count incremented and the caller must use dev_put() to
609 * release it when it is no longer needed. %NULL is returned if no
610 * matching device is found.
611 */
612
Eric W. Biederman881d9662007-09-17 11:56:21 -0700613struct net_device *dev_get_by_name(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614{
615 struct net_device *dev;
616
617 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700618 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (dev)
620 dev_hold(dev);
621 read_unlock(&dev_base_lock);
622 return dev;
623}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700624EXPORT_SYMBOL(dev_get_by_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626/**
627 * __dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700628 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 * @ifindex: index of device
630 *
631 * Search for an interface by index. Returns %NULL if the device
632 * is not found or a pointer to the device. The device has not
633 * had its reference counter increased so the caller must be careful
634 * about locking. The caller must hold either the RTNL semaphore
635 * or @dev_base_lock.
636 */
637
Eric W. Biederman881d9662007-09-17 11:56:21 -0700638struct net_device *__dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
640 struct hlist_node *p;
641
Eric W. Biederman881d9662007-09-17 11:56:21 -0700642 hlist_for_each(p, dev_index_hash(net, ifindex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 struct net_device *dev
644 = hlist_entry(p, struct net_device, index_hlist);
645 if (dev->ifindex == ifindex)
646 return dev;
647 }
648 return NULL;
649}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700650EXPORT_SYMBOL(__dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
652
653/**
654 * dev_get_by_index - find a device by its ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700655 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 * @ifindex: index of device
657 *
658 * Search for an interface by index. Returns NULL if the device
659 * is not found or a pointer to the device. The device returned has
660 * had a reference added and the pointer is safe until the user calls
661 * dev_put to indicate they have finished with it.
662 */
663
Eric W. Biederman881d9662007-09-17 11:56:21 -0700664struct net_device *dev_get_by_index(struct net *net, int ifindex)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 struct net_device *dev;
667
668 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700669 dev = __dev_get_by_index(net, ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (dev)
671 dev_hold(dev);
672 read_unlock(&dev_base_lock);
673 return dev;
674}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700675EXPORT_SYMBOL(dev_get_by_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677/**
678 * dev_getbyhwaddr - find a device by its hardware address
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700679 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * @type: media type of device
681 * @ha: hardware address
682 *
683 * Search for an interface by MAC address. Returns NULL if the device
684 * is not found or a pointer to the device. The caller must hold the
685 * rtnl semaphore. The returned device has not had its ref count increased
686 * and the caller must therefore be careful about locking
687 *
688 * BUGS:
689 * If the API was consistent this would be __dev_get_by_hwaddr
690 */
691
Eric W. Biederman881d9662007-09-17 11:56:21 -0700692struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 struct net_device *dev;
695
696 ASSERT_RTNL();
697
Denis V. Lunev81103a52007-12-12 10:47:38 -0800698 for_each_netdev(net, dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (dev->type == type &&
700 !memcmp(dev->dev_addr, ha, dev->addr_len))
Pavel Emelianov7562f872007-05-03 15:13:45 -0700701 return dev;
702
703 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
Jochen Friedrichcf309e32005-09-22 04:44:55 -0300705EXPORT_SYMBOL(dev_getbyhwaddr);
706
Eric W. Biederman881d9662007-09-17 11:56:21 -0700707struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700708{
709 struct net_device *dev;
710
711 ASSERT_RTNL();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700712 for_each_netdev(net, dev)
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700713 if (dev->type == type)
Pavel Emelianov7562f872007-05-03 15:13:45 -0700714 return dev;
715
716 return NULL;
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700717}
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700718EXPORT_SYMBOL(__dev_getfirstbyhwtype);
719
Eric W. Biederman881d9662007-09-17 11:56:21 -0700720struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 struct net_device *dev;
723
724 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -0700725 dev = __dev_getfirstbyhwtype(net, type);
Patrick McHardy4e9cac22007-05-03 03:28:13 -0700726 if (dev)
727 dev_hold(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 rtnl_unlock();
729 return dev;
730}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731EXPORT_SYMBOL(dev_getfirstbyhwtype);
732
733/**
734 * dev_get_by_flags - find any device with given flags
Randy Dunlapc4ea43c2007-10-12 21:17:49 -0700735 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 * @if_flags: IFF_* values
737 * @mask: bitmask of bits in if_flags to check
738 *
739 * Search for any interface with the given flags. Returns NULL if a device
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +0900740 * is not found or a pointer to the device. The device returned has
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * had a reference added and the pointer is safe until the user calls
742 * dev_put to indicate they have finished with it.
743 */
744
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700745struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
746 unsigned short mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Pavel Emelianov7562f872007-05-03 15:13:45 -0700748 struct net_device *dev, *ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Pavel Emelianov7562f872007-05-03 15:13:45 -0700750 ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700752 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (((dev->flags ^ if_flags) & mask) == 0) {
754 dev_hold(dev);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700755 ret = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 break;
757 }
758 }
759 read_unlock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -0700760 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700762EXPORT_SYMBOL(dev_get_by_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764/**
765 * dev_valid_name - check if name is okay for network device
766 * @name: name string
767 *
768 * Network device names need to be valid file names to
David S. Millerc7fa9d12006-08-15 16:34:13 -0700769 * to allow sysfs to work. We also disallow any kind of
770 * whitespace.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 */
Mitch Williamsc2373ee2005-11-09 10:34:45 -0800772int dev_valid_name(const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
David S. Millerc7fa9d12006-08-15 16:34:13 -0700774 if (*name == '\0')
775 return 0;
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -0700776 if (strlen(name) >= IFNAMSIZ)
777 return 0;
David S. Millerc7fa9d12006-08-15 16:34:13 -0700778 if (!strcmp(name, ".") || !strcmp(name, ".."))
779 return 0;
780
781 while (*name) {
782 if (*name == '/' || isspace(*name))
783 return 0;
784 name++;
785 }
786 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700788EXPORT_SYMBOL(dev_valid_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790/**
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200791 * __dev_alloc_name - allocate a name for a device
792 * @net: network namespace to allocate the device name in
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 * @name: name format string
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200794 * @buf: scratch buffer and result name string
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 *
796 * Passed a format string - eg "lt%d" it will try and find a suitable
Stephen Hemminger3041a062006-05-26 13:25:24 -0700797 * id. It scans list of devices to build up a free map, then chooses
798 * the first empty slot. The caller must hold the dev_base or rtnl lock
799 * while allocating the name and adding the device in order to avoid
800 * duplicates.
801 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
802 * Returns the number of the unit assigned or a negative errno code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 */
804
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200805static int __dev_alloc_name(struct net *net, const char *name, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
807 int i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 const char *p;
809 const int max_netdevices = 8*PAGE_SIZE;
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700810 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 struct net_device *d;
812
813 p = strnchr(name, IFNAMSIZ-1, '%');
814 if (p) {
815 /*
816 * Verify the string as this thing may have come from
817 * the user. There must be either one "%d" and no other "%"
818 * characters.
819 */
820 if (p[1] != 'd' || strchr(p + 2, '%'))
821 return -EINVAL;
822
823 /* Use one page as a bit array of possible slots */
Stephen Hemmingercfcabdc2007-10-09 01:59:42 -0700824 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (!inuse)
826 return -ENOMEM;
827
Eric W. Biederman881d9662007-09-17 11:56:21 -0700828 for_each_netdev(net, d) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (!sscanf(d->name, name, &i))
830 continue;
831 if (i < 0 || i >= max_netdevices)
832 continue;
833
834 /* avoid cases where sscanf is not exact inverse of printf */
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200835 snprintf(buf, IFNAMSIZ, name, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (!strncmp(buf, d->name, IFNAMSIZ))
837 set_bit(i, inuse);
838 }
839
840 i = find_first_zero_bit(inuse, max_netdevices);
841 free_page((unsigned long) inuse);
842 }
843
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200844 snprintf(buf, IFNAMSIZ, name, i);
845 if (!__dev_get_by_name(net, buf))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 /* It is possible to run out of possible slots
849 * when the name is long and there isn't enough space left
850 * for the digits, or if all bits are used.
851 */
852 return -ENFILE;
853}
854
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200855/**
856 * dev_alloc_name - allocate a name for a device
857 * @dev: device
858 * @name: name format string
859 *
860 * Passed a format string - eg "lt%d" it will try and find a suitable
861 * id. It scans list of devices to build up a free map, then chooses
862 * the first empty slot. The caller must hold the dev_base or rtnl lock
863 * while allocating the name and adding the device in order to avoid
864 * duplicates.
865 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
866 * Returns the number of the unit assigned or a negative errno code.
867 */
868
869int dev_alloc_name(struct net_device *dev, const char *name)
870{
871 char buf[IFNAMSIZ];
872 struct net *net;
873 int ret;
874
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900875 BUG_ON(!dev_net(dev));
876 net = dev_net(dev);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200877 ret = __dev_alloc_name(net, name, buf);
878 if (ret >= 0)
879 strlcpy(dev->name, buf, IFNAMSIZ);
880 return ret;
881}
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700882EXPORT_SYMBOL(dev_alloc_name);
Eric W. Biedermanb267b172007-09-12 13:48:45 +0200883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -0700893int dev_change_name(struct net_device *dev, const char *newname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894{
Herbert Xufcc5a032007-07-30 17:03:38 -0700895 char oldname[IFNAMSIZ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 int err = 0;
Herbert Xufcc5a032007-07-30 17:03:38 -0700897 int ret;
Eric W. Biederman881d9662007-09-17 11:56:21 -0700898 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 ASSERT_RTNL();
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900901 BUG_ON(!dev_net(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900903 net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
Stephen Hemmingerc8d90dc2007-10-26 03:53:42 -0700910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
Herbert Xufcc5a032007-07-30 17:03:38 -0700913 memcpy(oldname, dev->name, IFNAMSIZ);
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700919 } else if (__dev_get_by_name(net, newname))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -EEXIST;
921 else
922 strlcpy(dev->name, newname, IFNAMSIZ);
923
Herbert Xufcc5a032007-07-30 17:03:38 -0700924rollback:
Eric W. Biederman38918452008-10-27 17:51:47 -0700925 /* For now only devices in the initial network namespace
926 * are in sysfs.
927 */
928 if (net == &init_net) {
929 ret = device_rename(&dev->dev, dev->name);
930 if (ret) {
931 memcpy(dev->name, oldname, IFNAMSIZ);
932 return ret;
933 }
Stephen Hemmingerdcc99772008-05-14 22:33:38 -0700934 }
Herbert Xu7f988ea2007-07-30 16:35:46 -0700935
936 write_lock_bh(&dev_base_lock);
Eric W. Biederman92749822007-04-03 00:07:30 -0600937 hlist_del(&dev->name_hlist);
Eric W. Biederman881d9662007-09-17 11:56:21 -0700938 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
Herbert Xu7f988ea2007-07-30 16:35:46 -0700939 write_unlock_bh(&dev_base_lock);
940
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700941 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -0700942 ret = notifier_to_errno(ret);
943
944 if (ret) {
945 if (err) {
946 printk(KERN_ERR
947 "%s: name change rollback failed: %d.\n",
948 dev->name, ret);
949 } else {
950 err = ret;
951 memcpy(dev->name, oldname, IFNAMSIZ);
952 goto rollback;
953 }
954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
956 return err;
957}
958
959/**
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700960 * dev_set_alias - change ifalias of a device
961 * @dev: device
962 * @alias: name up to IFALIASZ
Stephen Hemmingerf0db2752008-09-30 02:23:58 -0700963 * @len: limit of bytes to copy from info
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700964 *
965 * Set ifalias for a device,
966 */
967int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
968{
969 ASSERT_RTNL();
970
971 if (len >= IFALIASZ)
972 return -EINVAL;
973
Oliver Hartkopp96ca4a22008-09-23 21:23:19 -0700974 if (!len) {
975 if (dev->ifalias) {
976 kfree(dev->ifalias);
977 dev->ifalias = NULL;
978 }
979 return 0;
980 }
981
Eric Dumazetd1b19df2009-09-03 01:29:39 -0700982 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700983 if (!dev->ifalias)
984 return -ENOMEM;
985
986 strlcpy(dev->ifalias, alias, len+1);
987 return len;
988}
989
990
991/**
Stephen Hemminger3041a062006-05-26 13:25:24 -0700992 * netdev_features_change - device changes features
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -0700993 * @dev: device to cause notification
994 *
995 * Called to indicate a device has changed features.
996 */
997void netdev_features_change(struct net_device *dev)
998{
Pavel Emelyanov056925a2007-09-16 15:42:43 -0700999 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
Stephen Hemmingerd8a33ac2005-05-29 14:13:47 -07001000}
1001EXPORT_SYMBOL(netdev_features_change);
1002
1003/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 * netdev_state_change - device changes state
1005 * @dev: device to cause notification
1006 *
1007 * Called to indicate a device has changed state. This function calls
1008 * the notifier chains for netdev_chain and sends a NEWLINK message
1009 * to the routing socket.
1010 */
1011void netdev_state_change(struct net_device *dev)
1012{
1013 if (dev->flags & IFF_UP) {
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001014 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1016 }
1017}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001018EXPORT_SYMBOL(netdev_state_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Moni Shoua75c78502009-09-15 02:37:40 -07001020void netdev_bonding_change(struct net_device *dev, unsigned long event)
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001021{
Moni Shoua75c78502009-09-15 02:37:40 -07001022 call_netdevice_notifiers(event, dev);
Or Gerlitzc1da4ac2008-06-13 18:12:00 -07001023}
1024EXPORT_SYMBOL(netdev_bonding_change);
1025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026/**
1027 * dev_load - load a network module
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001028 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 * @name: name of interface
1030 *
1031 * If a network interface is not present and the process has suitable
1032 * privileges this function loads the module. If module loading is not
1033 * available in this kernel then it becomes a nop.
1034 */
1035
Eric W. Biederman881d9662007-09-17 11:56:21 -07001036void dev_load(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001038 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07001041 dev = __dev_get_by_name(net, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 read_unlock(&dev_base_lock);
1043
Eric Parisa8f80e82009-08-13 09:44:51 -04001044 if (!dev && capable(CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 request_module("%s", name);
1046}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001047EXPORT_SYMBOL(dev_load);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049/**
1050 * dev_open - prepare an interface for use.
1051 * @dev: device to open
1052 *
1053 * Takes a device from down to up state. The device's private open
1054 * function is invoked and then the multicast lists are loaded. Finally
1055 * the device is moved into the up state and a %NETDEV_UP message is
1056 * sent to the netdev notifier chain.
1057 *
1058 * Calling this function on an active interface is a nop. On a failure
1059 * a negative errno code is returned.
1060 */
1061int dev_open(struct net_device *dev)
1062{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001063 const struct net_device_ops *ops = dev->netdev_ops;
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001064 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001066 ASSERT_RTNL();
1067
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 /*
1069 * Is it already up?
1070 */
1071
1072 if (dev->flags & IFF_UP)
1073 return 0;
1074
1075 /*
1076 * Is it even present?
1077 */
1078 if (!netif_device_present(dev))
1079 return -ENODEV;
1080
Johannes Berg3b8bcfd2009-05-30 01:39:53 +02001081 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1082 ret = notifier_to_errno(ret);
1083 if (ret)
1084 return ret;
1085
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /*
1087 * Call device private open method
1088 */
1089 set_bit(__LINK_STATE_START, &dev->state);
Jeff Garzikbada3392007-10-23 20:19:37 -07001090
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001091 if (ops->ndo_validate_addr)
1092 ret = ops->ndo_validate_addr(dev);
Jeff Garzikbada3392007-10-23 20:19:37 -07001093
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001094 if (!ret && ops->ndo_open)
1095 ret = ops->ndo_open(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001097 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 * If it went open OK then:
1099 */
1100
Jeff Garzikbada3392007-10-23 20:19:37 -07001101 if (ret)
1102 clear_bit(__LINK_STATE_START, &dev->state);
1103 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 /*
1105 * Set the flags.
1106 */
1107 dev->flags |= IFF_UP;
1108
1109 /*
Dan Williams649274d2009-01-11 00:20:39 -08001110 * Enable NET_DMA
1111 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001112 net_dmaengine_get();
Dan Williams649274d2009-01-11 00:20:39 -08001113
1114 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 * Initialize multicasting status
1116 */
Patrick McHardy4417da62007-06-27 01:28:10 -07001117 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
1119 /*
1120 * Wakeup transmit queue engine
1121 */
1122 dev_activate(dev);
1123
1124 /*
1125 * ... and announce new interface.
1126 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001127 call_netdevice_notifiers(NETDEV_UP, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
Jeff Garzikbada3392007-10-23 20:19:37 -07001129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return ret;
1131}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001132EXPORT_SYMBOL(dev_open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134/**
1135 * dev_close - shutdown an interface.
1136 * @dev: device to shutdown
1137 *
1138 * This function moves an active device into down state. A
1139 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1140 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1141 * chain.
1142 */
1143int dev_close(struct net_device *dev)
1144{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001145 const struct net_device_ops *ops = dev->netdev_ops;
Ben Hutchingse46b66b2008-05-08 02:53:17 -07001146 ASSERT_RTNL();
1147
David S. Miller9d5010d2007-09-12 14:33:25 +02001148 might_sleep();
1149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 if (!(dev->flags & IFF_UP))
1151 return 0;
1152
1153 /*
1154 * Tell people we are going down, so that they can
1155 * prepare to death, when device is still operating.
1156 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001157 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 clear_bit(__LINK_STATE_START, &dev->state);
1160
1161 /* Synchronize to scheduled poll. We cannot touch poll list,
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001162 * it can be even on different cpu. So just clear netif_running().
1163 *
1164 * dev->stop() will invoke napi_disable() on all of it's
1165 * napi_struct instances on this device.
1166 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 smp_mb__after_clear_bit(); /* Commit netif_running(). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
Matti Linnanvuorid8b2a4d2008-02-12 23:10:11 -08001169 dev_deactivate(dev);
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 /*
1172 * Call the device specific close. This cannot fail.
1173 * Only if device is UP
1174 *
1175 * We allow it to be called even after a DETACH hot-plug
1176 * event.
1177 */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08001178 if (ops->ndo_stop)
1179 ops->ndo_stop(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
1181 /*
1182 * Device is now down.
1183 */
1184
1185 dev->flags &= ~IFF_UP;
1186
1187 /*
1188 * Tell people we are down
1189 */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07001190 call_netdevice_notifiers(NETDEV_DOWN, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Dan Williams649274d2009-01-11 00:20:39 -08001192 /*
1193 * Shutdown NET_DMA
1194 */
David S. Millerb4bd07c2009-02-06 22:06:43 -08001195 net_dmaengine_put();
Dan Williams649274d2009-01-11 00:20:39 -08001196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return 0;
1198}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001199EXPORT_SYMBOL(dev_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201
Ben Hutchings0187bdf2008-06-19 16:15:47 -07001202/**
1203 * dev_disable_lro - disable Large Receive Offload on a device
1204 * @dev: device
1205 *
1206 * Disable Large Receive Offload (LRO) on a net device. Must be
1207 * called under RTNL. This is needed if received packets may be
1208 * forwarded to another interface.
1209 */
1210void dev_disable_lro(struct net_device *dev)
1211{
1212 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1213 dev->ethtool_ops->set_flags) {
1214 u32 flags = dev->ethtool_ops->get_flags(dev);
1215 if (flags & ETH_FLAG_LRO) {
1216 flags &= ~ETH_FLAG_LRO;
1217 dev->ethtool_ops->set_flags(dev, flags);
1218 }
1219 }
1220 WARN_ON(dev->features & NETIF_F_LRO);
1221}
1222EXPORT_SYMBOL(dev_disable_lro);
1223
1224
Eric W. Biederman881d9662007-09-17 11:56:21 -07001225static int dev_boot_phase = 1;
1226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227/*
1228 * Device change register/unregister. These are not inline or static
1229 * as we export them to the world.
1230 */
1231
1232/**
1233 * register_netdevice_notifier - register a network notifier block
1234 * @nb: notifier
1235 *
1236 * Register a notifier to be called when network device events occur.
1237 * The notifier passed is linked into the kernel structures and must
1238 * not be reused until it has been unregistered. A negative errno code
1239 * is returned on a failure.
1240 *
1241 * When registered all registration and up events are replayed
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001242 * to the new notifier to allow device to have a race free
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 * view of the network device list.
1244 */
1245
1246int register_netdevice_notifier(struct notifier_block *nb)
1247{
1248 struct net_device *dev;
Herbert Xufcc5a032007-07-30 17:03:38 -07001249 struct net_device *last;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001250 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 int err;
1252
1253 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001254 err = raw_notifier_chain_register(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001255 if (err)
1256 goto unlock;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001257 if (dev_boot_phase)
1258 goto unlock;
1259 for_each_net(net) {
1260 for_each_netdev(net, dev) {
1261 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1262 err = notifier_to_errno(err);
1263 if (err)
1264 goto rollback;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Eric W. Biederman881d9662007-09-17 11:56:21 -07001266 if (!(dev->flags & IFF_UP))
1267 continue;
Herbert Xufcc5a032007-07-30 17:03:38 -07001268
Eric W. Biederman881d9662007-09-17 11:56:21 -07001269 nb->notifier_call(nb, NETDEV_UP, dev);
1270 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001272
1273unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 rtnl_unlock();
1275 return err;
Herbert Xufcc5a032007-07-30 17:03:38 -07001276
1277rollback:
1278 last = dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07001279 for_each_net(net) {
1280 for_each_netdev(net, dev) {
1281 if (dev == last)
1282 break;
Herbert Xufcc5a032007-07-30 17:03:38 -07001283
Eric W. Biederman881d9662007-09-17 11:56:21 -07001284 if (dev->flags & IFF_UP) {
1285 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1286 nb->notifier_call(nb, NETDEV_DOWN, dev);
1287 }
1288 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07001289 }
Herbert Xufcc5a032007-07-30 17:03:38 -07001290 }
Pavel Emelyanovc67625a2007-11-14 15:53:16 -08001291
1292 raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xufcc5a032007-07-30 17:03:38 -07001293 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001295EXPORT_SYMBOL(register_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297/**
1298 * unregister_netdevice_notifier - unregister a network notifier block
1299 * @nb: notifier
1300 *
1301 * Unregister a notifier previously registered by
1302 * register_netdevice_notifier(). The notifier is unlinked into the
1303 * kernel structures and may then be reused. A negative errno code
1304 * is returned on a failure.
1305 */
1306
1307int unregister_netdevice_notifier(struct notifier_block *nb)
1308{
Herbert Xu9f514952006-03-25 01:24:25 -08001309 int err;
1310
1311 rtnl_lock();
Alan Sternf07d5b92006-05-09 15:23:03 -07001312 err = raw_notifier_chain_unregister(&netdev_chain, nb);
Herbert Xu9f514952006-03-25 01:24:25 -08001313 rtnl_unlock();
1314 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001316EXPORT_SYMBOL(unregister_netdevice_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318/**
1319 * call_netdevice_notifiers - call all network notifier blocks
1320 * @val: value passed unmodified to notifier function
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07001321 * @dev: net_device pointer passed unmodified to notifier function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 *
1323 * Call all network notifier blocks. Parameters and return value
Alan Sternf07d5b92006-05-09 15:23:03 -07001324 * are as for raw_notifier_call_chain().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 */
1326
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001327int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
Eric W. Biedermanad7379d2007-09-16 15:33:32 -07001329 return raw_notifier_call_chain(&netdev_chain, val, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
1331
1332/* When > 0 there are consumers of rx skb time stamps */
1333static atomic_t netstamp_needed = ATOMIC_INIT(0);
1334
1335void net_enable_timestamp(void)
1336{
1337 atomic_inc(&netstamp_needed);
1338}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001339EXPORT_SYMBOL(net_enable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341void net_disable_timestamp(void)
1342{
1343 atomic_dec(&netstamp_needed);
1344}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001345EXPORT_SYMBOL(net_disable_timestamp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001347static inline void net_timestamp(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348{
1349 if (atomic_read(&netstamp_needed))
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001350 __net_timestamp(skb);
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07001351 else
1352 skb->tstamp.tv64 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353}
1354
1355/*
1356 * Support routine. Sends outgoing frames to any network
1357 * taps currently in use.
1358 */
1359
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001360static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct packet_type *ptype;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001363
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001364#ifdef CONFIG_NET_CLS_ACT
1365 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1366 net_timestamp(skb);
1367#else
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001368 net_timestamp(skb);
Jarek Poplawski8caf1532009-04-17 10:08:49 +00001369#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370
1371 rcu_read_lock();
1372 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1373 /* Never send packets back to the socket
1374 * they originated from - MvS (miquels@drinkel.ow.org)
1375 */
1376 if ((ptype->dev == dev || !ptype->dev) &&
1377 (ptype->af_packet_priv == NULL ||
1378 (struct sock *)ptype->af_packet_priv != skb->sk)) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001379 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 if (!skb2)
1381 break;
1382
1383 /* skb->nh should be correctly
1384 set by sender, so that the second statement is
1385 just protection against buggy protocols.
1386 */
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001387 skb_reset_mac_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001389 if (skb_network_header(skb2) < skb2->data ||
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001390 skb2->network_header > skb2->tail) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 if (net_ratelimit())
1392 printk(KERN_CRIT "protocol %04x is "
1393 "buggy, dev %s\n",
1394 skb2->protocol, dev->name);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07001395 skb_reset_network_header(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 }
1397
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001398 skb2->transport_header = skb2->network_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 skb2->pkt_type = PACKET_OUTGOING;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07001400 ptype->func(skb2, skb->dev, ptype, skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 }
1402 }
1403 rcu_read_unlock();
1404}
1405
Denis Vlasenko56079432006-03-29 15:57:29 -08001406
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001407static inline void __netif_reschedule(struct Qdisc *q)
1408{
1409 struct softnet_data *sd;
1410 unsigned long flags;
1411
1412 local_irq_save(flags);
1413 sd = &__get_cpu_var(softnet_data);
1414 q->next_sched = sd->output_queue;
1415 sd->output_queue = q;
1416 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1417 local_irq_restore(flags);
1418}
1419
David S. Miller37437bb2008-07-16 02:15:04 -07001420void __netif_schedule(struct Qdisc *q)
Denis Vlasenko56079432006-03-29 15:57:29 -08001421{
Jarek Poplawskidef82a12008-08-17 21:54:43 -07001422 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1423 __netif_reschedule(q);
Denis Vlasenko56079432006-03-29 15:57:29 -08001424}
1425EXPORT_SYMBOL(__netif_schedule);
1426
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001427void dev_kfree_skb_irq(struct sk_buff *skb)
Denis Vlasenko56079432006-03-29 15:57:29 -08001428{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001429 if (atomic_dec_and_test(&skb->users)) {
1430 struct softnet_data *sd;
1431 unsigned long flags;
Denis Vlasenko56079432006-03-29 15:57:29 -08001432
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001433 local_irq_save(flags);
1434 sd = &__get_cpu_var(softnet_data);
1435 skb->next = sd->completion_queue;
1436 sd->completion_queue = skb;
1437 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1438 local_irq_restore(flags);
1439 }
Denis Vlasenko56079432006-03-29 15:57:29 -08001440}
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001441EXPORT_SYMBOL(dev_kfree_skb_irq);
Denis Vlasenko56079432006-03-29 15:57:29 -08001442
1443void dev_kfree_skb_any(struct sk_buff *skb)
1444{
1445 if (in_irq() || irqs_disabled())
1446 dev_kfree_skb_irq(skb);
1447 else
1448 dev_kfree_skb(skb);
1449}
1450EXPORT_SYMBOL(dev_kfree_skb_any);
1451
1452
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001453/**
1454 * netif_device_detach - mark device as removed
1455 * @dev: network device
1456 *
1457 * Mark device as removed from system and therefore no longer available.
1458 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001459void netif_device_detach(struct net_device *dev)
1460{
1461 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1462 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001463 netif_tx_stop_all_queues(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001464 }
1465}
1466EXPORT_SYMBOL(netif_device_detach);
1467
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001468/**
1469 * netif_device_attach - mark device as attached
1470 * @dev: network device
1471 *
1472 * Mark device as attached from system and restart if needed.
1473 */
Denis Vlasenko56079432006-03-29 15:57:29 -08001474void netif_device_attach(struct net_device *dev)
1475{
1476 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1477 netif_running(dev)) {
Alexander Duyckd5431032009-04-08 13:15:22 +00001478 netif_tx_wake_all_queues(dev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001479 __netdev_watchdog_up(dev);
Denis Vlasenko56079432006-03-29 15:57:29 -08001480 }
1481}
1482EXPORT_SYMBOL(netif_device_attach);
1483
Ben Hutchings6de329e2008-06-16 17:02:28 -07001484static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1485{
1486 return ((features & NETIF_F_GEN_CSUM) ||
1487 ((features & NETIF_F_IP_CSUM) &&
1488 protocol == htons(ETH_P_IP)) ||
1489 ((features & NETIF_F_IPV6_CSUM) &&
Yi Zou1c8dbcf2009-02-27 14:06:54 -08001490 protocol == htons(ETH_P_IPV6)) ||
1491 ((features & NETIF_F_FCOE_CRC) &&
1492 protocol == htons(ETH_P_FCOE)));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001493}
1494
1495static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1496{
1497 if (can_checksum_protocol(dev->features, skb->protocol))
1498 return true;
1499
1500 if (skb->protocol == htons(ETH_P_8021Q)) {
1501 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1502 if (can_checksum_protocol(dev->features & dev->vlan_features,
1503 veh->h_vlan_encapsulated_proto))
1504 return true;
1505 }
1506
1507 return false;
1508}
Denis Vlasenko56079432006-03-29 15:57:29 -08001509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510/*
1511 * Invalidate hardware checksum when packet is to be mangled, and
1512 * complete checksum manually on outgoing path.
1513 */
Patrick McHardy84fa7932006-08-29 16:44:56 -07001514int skb_checksum_help(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515{
Al Virod3bc23e2006-11-14 21:24:49 -08001516 __wsum csum;
Herbert Xu663ead32007-04-09 11:59:07 -07001517 int ret = 0, offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Patrick McHardy84fa7932006-08-29 16:44:56 -07001519 if (skb->ip_summed == CHECKSUM_COMPLETE)
Herbert Xua430a432006-07-08 13:34:56 -07001520 goto out_set_summed;
1521
1522 if (unlikely(skb_shinfo(skb)->gso_size)) {
Herbert Xua430a432006-07-08 13:34:56 -07001523 /* Let GSO fix up the checksum. */
1524 goto out_set_summed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 }
1526
Herbert Xua0308472007-10-15 01:47:15 -07001527 offset = skb->csum_start - skb_headroom(skb);
1528 BUG_ON(offset >= skb_headlen(skb));
1529 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1530
1531 offset += skb->csum_offset;
1532 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1533
1534 if (skb_cloned(skb) &&
1535 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1537 if (ret)
1538 goto out;
1539 }
1540
Herbert Xua0308472007-10-15 01:47:15 -07001541 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
Herbert Xua430a432006-07-08 13:34:56 -07001542out_set_summed:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 skb->ip_summed = CHECKSUM_NONE;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001544out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return ret;
1546}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001547EXPORT_SYMBOL(skb_checksum_help);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001549/**
1550 * skb_gso_segment - Perform segmentation on skb.
1551 * @skb: buffer to segment
Herbert Xu576a30e2006-06-27 13:22:38 -07001552 * @features: features for the output path (see dev->features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001553 *
1554 * This function segments the given skb and returns a list of segments.
Herbert Xu576a30e2006-06-27 13:22:38 -07001555 *
1556 * It may return NULL if the skb requires no segmentation. This is
1557 * only possible when GSO is used for verifying header integrity.
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001558 */
Herbert Xu576a30e2006-06-27 13:22:38 -07001559struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001560{
1561 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1562 struct packet_type *ptype;
Al Viro252e3342006-11-14 20:48:11 -08001563 __be16 type = skb->protocol;
Herbert Xua430a432006-07-08 13:34:56 -07001564 int err;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001565
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001566 skb_reset_mac_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001567 skb->mac_len = skb->network_header - skb->mac_header;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001568 __skb_pull(skb, skb->mac_len);
1569
Herbert Xu67fd1a72009-01-19 16:26:44 -08001570 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1571 struct net_device *dev = skb->dev;
1572 struct ethtool_drvinfo info = {};
1573
1574 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1575 dev->ethtool_ops->get_drvinfo(dev, &info);
1576
1577 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1578 "ip_summed=%d",
1579 info.driver, dev ? dev->features : 0L,
1580 skb->sk ? skb->sk->sk_route_caps : 0L,
1581 skb->len, skb->data_len, skb->ip_summed);
1582
Herbert Xua430a432006-07-08 13:34:56 -07001583 if (skb_header_cloned(skb) &&
1584 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1585 return ERR_PTR(err);
1586 }
1587
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001588 rcu_read_lock();
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08001589 list_for_each_entry_rcu(ptype,
1590 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001591 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
Patrick McHardy84fa7932006-08-29 16:44:56 -07001592 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
Herbert Xua430a432006-07-08 13:34:56 -07001593 err = ptype->gso_send_check(skb);
1594 segs = ERR_PTR(err);
1595 if (err || skb_gso_ok(skb, features))
1596 break;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001597 __skb_push(skb, (skb->data -
1598 skb_network_header(skb)));
Herbert Xua430a432006-07-08 13:34:56 -07001599 }
Herbert Xu576a30e2006-06-27 13:22:38 -07001600 segs = ptype->gso_segment(skb, features);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001601 break;
1602 }
1603 }
1604 rcu_read_unlock();
1605
Arnaldo Carvalho de Melo98e399f2007-03-19 15:33:04 -07001606 __skb_push(skb, skb->data - skb_mac_header(skb));
Herbert Xu576a30e2006-06-27 13:22:38 -07001607
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001608 return segs;
1609}
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001610EXPORT_SYMBOL(skb_gso_segment);
1611
Herbert Xufb286bb2005-11-10 13:01:24 -08001612/* Take action when hardware reception checksum errors are detected. */
1613#ifdef CONFIG_BUG
1614void netdev_rx_csum_fault(struct net_device *dev)
1615{
1616 if (net_ratelimit()) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001617 printk(KERN_ERR "%s: hw csum failure.\n",
Stephen Hemminger246a4212005-12-08 15:21:39 -08001618 dev ? dev->name : "<unknown>");
Herbert Xufb286bb2005-11-10 13:01:24 -08001619 dump_stack();
1620 }
1621}
1622EXPORT_SYMBOL(netdev_rx_csum_fault);
1623#endif
1624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625/* Actually, we should eliminate this check as soon as we know, that:
1626 * 1. IOMMU is present and allows to map all the memory.
1627 * 2. No high memory really exists on this machine.
1628 */
1629
1630static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1631{
Herbert Xu3d3a8532006-06-27 13:33:10 -07001632#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 int i;
1634
1635 if (dev->features & NETIF_F_HIGHDMA)
1636 return 0;
1637
1638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1639 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1640 return 1;
1641
Herbert Xu3d3a8532006-06-27 13:33:10 -07001642#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 return 0;
1644}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001646struct dev_gso_cb {
1647 void (*destructor)(struct sk_buff *skb);
1648};
1649
1650#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1651
1652static void dev_gso_skb_destructor(struct sk_buff *skb)
1653{
1654 struct dev_gso_cb *cb;
1655
1656 do {
1657 struct sk_buff *nskb = skb->next;
1658
1659 skb->next = nskb->next;
1660 nskb->next = NULL;
1661 kfree_skb(nskb);
1662 } while (skb->next);
1663
1664 cb = DEV_GSO_CB(skb);
1665 if (cb->destructor)
1666 cb->destructor(skb);
1667}
1668
1669/**
1670 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1671 * @skb: buffer to segment
1672 *
1673 * This function segments the given skb and stores the list of segments
1674 * in skb->next.
1675 */
1676static int dev_gso_segment(struct sk_buff *skb)
1677{
1678 struct net_device *dev = skb->dev;
1679 struct sk_buff *segs;
Herbert Xu576a30e2006-06-27 13:22:38 -07001680 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1681 NETIF_F_SG : 0);
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001682
Herbert Xu576a30e2006-06-27 13:22:38 -07001683 segs = skb_gso_segment(skb, features);
1684
1685 /* Verifying header integrity only. */
1686 if (!segs)
1687 return 0;
1688
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001689 if (IS_ERR(segs))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001690 return PTR_ERR(segs);
1691
1692 skb->next = segs;
1693 DEV_GSO_CB(skb)->destructor = skb->destructor;
1694 skb->destructor = dev_gso_skb_destructor;
1695
1696 return 0;
1697}
1698
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001699int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1700 struct netdev_queue *txq)
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001701{
Stephen Hemminger00829822008-11-20 20:14:53 -08001702 const struct net_device_ops *ops = dev->netdev_ops;
Patrick Ohlyac45f602009-02-12 05:03:37 +00001703 int rc;
Stephen Hemminger00829822008-11-20 20:14:53 -08001704
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001705 if (likely(!skb->next)) {
Stephen Hemminger9be9a6b2007-04-20 17:02:45 -07001706 if (!list_empty(&ptype_all))
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001707 dev_queue_xmit_nit(skb, dev);
1708
Herbert Xu576a30e2006-06-27 13:22:38 -07001709 if (netif_needs_gso(dev, skb)) {
1710 if (unlikely(dev_gso_segment(skb)))
1711 goto out_kfree_skb;
1712 if (skb->next)
1713 goto gso;
1714 }
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001715
Eric Dumazet93f154b2009-05-18 22:19:19 -07001716 /*
1717 * If device doesnt need skb->dst, release it right now while
1718 * its hot in this cpu cache
1719 */
Eric Dumazetadf30902009-06-02 05:19:30 +00001720 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1721 skb_dst_drop(skb);
1722
Patrick Ohlyac45f602009-02-12 05:03:37 +00001723 rc = ops->ndo_start_xmit(skb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001724 if (rc == NETDEV_TX_OK)
Eric Dumazet08baf562009-05-25 22:58:01 -07001725 txq_trans_update(txq);
Patrick Ohlyac45f602009-02-12 05:03:37 +00001726 /*
1727 * TODO: if skb_orphan() was called by
1728 * dev->hard_start_xmit() (for example, the unmodified
1729 * igb driver does that; bnx2 doesn't), then
1730 * skb_tx_software_timestamp() will be unable to send
1731 * back the time stamp.
1732 *
1733 * How can this be prevented? Always create another
1734 * reference to the socket before calling
1735 * dev->hard_start_xmit()? Prevent that skb_orphan()
1736 * does anything in dev->hard_start_xmit() by clearing
1737 * the skb destructor before the call and restoring it
1738 * afterwards, then doing the skb_orphan() ourselves?
1739 */
Patrick Ohlyac45f602009-02-12 05:03:37 +00001740 return rc;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001741 }
1742
Herbert Xu576a30e2006-06-27 13:22:38 -07001743gso:
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001744 do {
1745 struct sk_buff *nskb = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001746
1747 skb->next = nskb->next;
1748 nskb->next = NULL;
Stephen Hemminger00829822008-11-20 20:14:53 -08001749 rc = ops->ndo_start_xmit(nskb, dev);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001750 if (unlikely(rc != NETDEV_TX_OK)) {
Michael Chanf54d9e82006-06-25 23:57:04 -07001751 nskb->next = skb->next;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001752 skb->next = nskb;
1753 return rc;
1754 }
Eric Dumazet08baf562009-05-25 22:58:01 -07001755 txq_trans_update(txq);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001756 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
Michael Chanf54d9e82006-06-25 23:57:04 -07001757 return NETDEV_TX_BUSY;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001758 } while (skb->next);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001759
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001760 skb->destructor = DEV_GSO_CB(skb)->destructor;
1761
1762out_kfree_skb:
1763 kfree_skb(skb);
Patrick McHardyec634fe2009-07-05 19:23:38 -07001764 return NETDEV_TX_OK;
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001765}
1766
David S. Miller70192982009-01-27 16:34:47 -08001767static u32 skb_tx_hashrnd;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001768
Stephen Hemminger92477442009-03-21 13:39:26 -07001769u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
David S. Miller8f0f2222008-07-15 03:47:03 -07001770{
David S. Miller70192982009-01-27 16:34:47 -08001771 u32 hash;
David S. Millerb6b2fed2008-07-21 09:48:06 -07001772
David S. Miller513de112009-05-03 14:43:10 -07001773 if (skb_rx_queue_recorded(skb)) {
1774 hash = skb_get_rx_queue(skb);
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001775 while (unlikely(hash >= dev->real_num_tx_queues))
David S. Miller513de112009-05-03 14:43:10 -07001776 hash -= dev->real_num_tx_queues;
1777 return hash;
1778 }
Eric Dumazetec581f62009-05-01 09:05:06 -07001779
1780 if (skb->sk && skb->sk->sk_hash)
David S. Miller70192982009-01-27 16:34:47 -08001781 hash = skb->sk->sk_hash;
Eric Dumazetec581f62009-05-01 09:05:06 -07001782 else
David S. Miller70192982009-01-27 16:34:47 -08001783 hash = skb->protocol;
David S. Millerd5a9e242009-01-27 16:22:11 -08001784
David S. Miller70192982009-01-27 16:34:47 -08001785 hash = jhash_1word(hash, skb_tx_hashrnd);
David S. Millerd5a9e242009-01-27 16:22:11 -08001786
David S. Millerb6b2fed2008-07-21 09:48:06 -07001787 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
David S. Miller8f0f2222008-07-15 03:47:03 -07001788}
Stephen Hemminger92477442009-03-21 13:39:26 -07001789EXPORT_SYMBOL(skb_tx_hash);
David S. Miller8f0f2222008-07-15 03:47:03 -07001790
David S. Millere8a04642008-07-17 00:34:19 -07001791static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1792 struct sk_buff *skb)
1793{
Stephen Hemminger00829822008-11-20 20:14:53 -08001794 const struct net_device_ops *ops = dev->netdev_ops;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001795 u16 queue_index = 0;
1796
Stephen Hemminger00829822008-11-20 20:14:53 -08001797 if (ops->ndo_select_queue)
1798 queue_index = ops->ndo_select_queue(dev, skb);
David S. Miller8f0f2222008-07-15 03:47:03 -07001799 else if (dev->real_num_tx_queues > 1)
David S. Miller70192982009-01-27 16:34:47 -08001800 queue_index = skb_tx_hash(dev, skb);
David S. Millereae792b2008-07-15 03:03:33 -07001801
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001802 skb_set_queue_mapping(skb, queue_index);
1803 return netdev_get_tx_queue(dev, queue_index);
David S. Millere8a04642008-07-17 00:34:19 -07001804}
1805
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001806static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
1807 struct net_device *dev,
1808 struct netdev_queue *txq)
1809{
1810 spinlock_t *root_lock = qdisc_lock(q);
1811 int rc;
1812
1813 spin_lock(root_lock);
1814 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1815 kfree_skb(skb);
1816 rc = NET_XMIT_DROP;
1817 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
1818 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
1819 /*
1820 * This is a work-conserving queue; there are no old skbs
1821 * waiting to be sent out; and the qdisc is not running -
1822 * xmit the skb directly.
1823 */
1824 __qdisc_update_bstats(q, skb->len);
1825 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
1826 __qdisc_run(q);
1827 else
1828 clear_bit(__QDISC_STATE_RUNNING, &q->state);
1829
1830 rc = NET_XMIT_SUCCESS;
1831 } else {
1832 rc = qdisc_enqueue_root(skb, q);
1833 qdisc_run(q);
1834 }
1835 spin_unlock(root_lock);
1836
1837 return rc;
1838}
1839
Dave Jonesd29f7492008-07-22 14:09:06 -07001840/**
1841 * dev_queue_xmit - transmit a buffer
1842 * @skb: buffer to transmit
1843 *
1844 * Queue a buffer for transmission to a network device. The caller must
1845 * have set the device and priority and built the buffer before calling
1846 * this function. The function can be called from an interrupt.
1847 *
1848 * A negative errno code is returned on a failure. A success does not
1849 * guarantee the frame will be transmitted as it may be dropped due
1850 * to congestion or traffic shaping.
1851 *
1852 * -----------------------------------------------------------------------------------
1853 * I notice this method can also return errors from the queue disciplines,
1854 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1855 * be positive.
1856 *
1857 * Regardless of the return value, the skb is consumed, so it is currently
1858 * difficult to retry a send to this method. (You can bump the ref count
1859 * before sending to hold a reference for retry if you are careful.)
1860 *
1861 * When calling this method, interrupts MUST be enabled. This is because
1862 * the BH enable code must have IRQs enabled so that it will not deadlock.
1863 * --BLG
1864 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865int dev_queue_xmit(struct sk_buff *skb)
1866{
1867 struct net_device *dev = skb->dev;
David S. Millerdc2b4842008-07-08 17:18:23 -07001868 struct netdev_queue *txq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 struct Qdisc *q;
1870 int rc = -ENOMEM;
1871
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001872 /* GSO will handle the following emulations directly. */
1873 if (netif_needs_gso(dev, skb))
1874 goto gso;
1875
David S. Miller4cf704f2009-06-09 00:18:51 -07001876 if (skb_has_frags(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 !(dev->features & NETIF_F_FRAGLIST) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001878 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 goto out_kfree_skb;
1880
1881 /* Fragmented skb is linearized if device does not support SG,
1882 * or if at least one of fragments is in highmem and device
1883 * does not support DMA from it.
1884 */
1885 if (skb_shinfo(skb)->nr_frags &&
1886 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
Herbert Xu364c6ba2006-06-09 16:10:40 -07001887 __skb_linearize(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 goto out_kfree_skb;
1889
1890 /* If packet is not checksummed and device does not support
1891 * checksumming for this protocol, complete checksumming here.
1892 */
Herbert Xu663ead32007-04-09 11:59:07 -07001893 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1894 skb_set_transport_header(skb, skb->csum_start -
1895 skb_headroom(skb));
Ben Hutchings6de329e2008-06-16 17:02:28 -07001896 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1897 goto out_kfree_skb;
Herbert Xu663ead32007-04-09 11:59:07 -07001898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Herbert Xuf6a78bf2006-06-22 02:57:17 -07001900gso:
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001901 /* Disable soft irqs for various locks below. Also
1902 * stops preemption for RCU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001904 rcu_read_lock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
David S. Millereae792b2008-07-15 03:03:33 -07001906 txq = dev_pick_tx(dev, skb);
David S. Millerb0e1e642008-07-08 17:42:10 -07001907 q = rcu_dereference(txq->qdisc);
David S. Miller37437bb2008-07-16 02:15:04 -07001908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909#ifdef CONFIG_NET_CLS_ACT
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001910 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911#endif
1912 if (q->enqueue) {
Krishna Kumarbbd8a0d2009-08-06 01:44:21 +00001913 rc = __dev_xmit_skb(skb, q, dev, txq);
David S. Miller37437bb2008-07-16 02:15:04 -07001914 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 }
1916
1917 /* The device has no queue. Common case for software devices:
1918 loopback, all the sorts of tunnels...
1919
Herbert Xu932ff272006-06-09 12:20:56 -07001920 Really, it is unlikely that netif_tx_lock protection is necessary
1921 here. (f.e. loopback and IP tunnels are clean ignoring statistics
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 counters.)
1923 However, it is possible, that they rely on protection
1924 made by us here.
1925
1926 Check this and shot the lock. It is not prone from deadlocks.
1927 Either shot noqueue qdisc, it is even simpler 8)
1928 */
1929 if (dev->flags & IFF_UP) {
1930 int cpu = smp_processor_id(); /* ok because BHs are off */
1931
David S. Millerc773e842008-07-08 23:13:53 -07001932 if (txq->xmit_lock_owner != cpu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
David S. Millerc773e842008-07-08 23:13:53 -07001934 HARD_TX_LOCK(dev, txq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001936 if (!netif_tx_queue_stopped(txq)) {
Krishna Kumar03a9a442009-08-29 20:21:36 +00001937 rc = NET_XMIT_SUCCESS;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001938 if (!dev_hard_start_xmit(skb, dev, txq)) {
David S. Millerc773e842008-07-08 23:13:53 -07001939 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 goto out;
1941 }
1942 }
David S. Millerc773e842008-07-08 23:13:53 -07001943 HARD_TX_UNLOCK(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 if (net_ratelimit())
1945 printk(KERN_CRIT "Virtual device %s asks to "
1946 "queue packet!\n", dev->name);
1947 } else {
1948 /* Recursion is detected! It is possible,
1949 * unfortunately */
1950 if (net_ratelimit())
1951 printk(KERN_CRIT "Dead loop on virtual device "
1952 "%s, fix it urgently!\n", dev->name);
1953 }
1954 }
1955
1956 rc = -ENETDOWN;
Herbert Xud4828d82006-06-22 02:28:18 -07001957 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
1959out_kfree_skb:
1960 kfree_skb(skb);
1961 return rc;
1962out:
Herbert Xud4828d82006-06-22 02:28:18 -07001963 rcu_read_unlock_bh();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 return rc;
1965}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07001966EXPORT_SYMBOL(dev_queue_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
1968
1969/*=======================================================================
1970 Receiver routines
1971 =======================================================================*/
1972
Stephen Hemminger6b2bedc2007-03-12 14:33:50 -07001973int netdev_max_backlog __read_mostly = 1000;
1974int netdev_budget __read_mostly = 300;
1975int weight_p __read_mostly = 64; /* old backlog weight */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1978
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980/**
1981 * netif_rx - post buffer to the network code
1982 * @skb: buffer to post
1983 *
1984 * This function receives a packet from a device driver and queues it for
1985 * the upper (protocol) levels to process. It always succeeds. The buffer
1986 * may be dropped during processing for congestion control or by the
1987 * protocol layers.
1988 *
1989 * return values:
1990 * NET_RX_SUCCESS (no congestion)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 * NET_RX_DROP (packet was dropped)
1992 *
1993 */
1994
1995int netif_rx(struct sk_buff *skb)
1996{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 struct softnet_data *queue;
1998 unsigned long flags;
1999
2000 /* if netpoll wants it, pretend we never saw it */
2001 if (netpoll_rx(skb))
2002 return NET_RX_DROP;
2003
Eric Dumazetb7aa0bf2007-04-19 16:16:32 -07002004 if (!skb->tstamp.tv64)
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002005 net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
2007 /*
2008 * The code is rearranged so that the path is the most
2009 * short when CPU is congested, but is still operating.
2010 */
2011 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 queue = &__get_cpu_var(softnet_data);
2013
2014 __get_cpu_var(netdev_rx_stat).total++;
2015 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2016 if (queue->input_pkt_queue.qlen) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017enqueue:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 __skb_queue_tail(&queue->input_pkt_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 local_irq_restore(flags);
Stephen Hemminger34008d82005-06-23 20:10:00 -07002020 return NET_RX_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 }
2022
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002023 napi_schedule(&queue->backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 goto enqueue;
2025 }
2026
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 __get_cpu_var(netdev_rx_stat).dropped++;
2028 local_irq_restore(flags);
2029
2030 kfree_skb(skb);
2031 return NET_RX_DROP;
2032}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002033EXPORT_SYMBOL(netif_rx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
2035int netif_rx_ni(struct sk_buff *skb)
2036{
2037 int err;
2038
2039 preempt_disable();
2040 err = netif_rx(skb);
2041 if (local_softirq_pending())
2042 do_softirq();
2043 preempt_enable();
2044
2045 return err;
2046}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047EXPORT_SYMBOL(netif_rx_ni);
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049static void net_tx_action(struct softirq_action *h)
2050{
2051 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2052
2053 if (sd->completion_queue) {
2054 struct sk_buff *clist;
2055
2056 local_irq_disable();
2057 clist = sd->completion_queue;
2058 sd->completion_queue = NULL;
2059 local_irq_enable();
2060
2061 while (clist) {
2062 struct sk_buff *skb = clist;
2063 clist = clist->next;
2064
Ilpo Järvinen547b7922008-07-25 21:43:18 -07002065 WARN_ON(atomic_read(&skb->users));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 __kfree_skb(skb);
2067 }
2068 }
2069
2070 if (sd->output_queue) {
David S. Miller37437bb2008-07-16 02:15:04 -07002071 struct Qdisc *head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
2073 local_irq_disable();
2074 head = sd->output_queue;
2075 sd->output_queue = NULL;
2076 local_irq_enable();
2077
2078 while (head) {
David S. Miller37437bb2008-07-16 02:15:04 -07002079 struct Qdisc *q = head;
2080 spinlock_t *root_lock;
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 head = head->next_sched;
2083
David S. Miller5fb66222008-08-02 20:02:43 -07002084 root_lock = qdisc_lock(q);
David S. Miller37437bb2008-07-16 02:15:04 -07002085 if (spin_trylock(root_lock)) {
Jarek Poplawskidef82a12008-08-17 21:54:43 -07002086 smp_mb__before_clear_bit();
2087 clear_bit(__QDISC_STATE_SCHED,
2088 &q->state);
David S. Miller37437bb2008-07-16 02:15:04 -07002089 qdisc_run(q);
2090 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 } else {
David S. Miller195648b2008-08-19 04:00:36 -07002092 if (!test_bit(__QDISC_STATE_DEACTIVATED,
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002093 &q->state)) {
David S. Miller195648b2008-08-19 04:00:36 -07002094 __netif_reschedule(q);
Jarek Poplawskie8a83e12008-09-07 18:41:21 -07002095 } else {
2096 smp_mb__before_clear_bit();
2097 clear_bit(__QDISC_STATE_SCHED,
2098 &q->state);
2099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
2101 }
2102 }
2103}
2104
Stephen Hemminger6f05f622007-03-08 20:46:03 -08002105static inline int deliver_skb(struct sk_buff *skb,
2106 struct packet_type *pt_prev,
2107 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
2109 atomic_inc(&skb->users);
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002110 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111}
2112
2113#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
Michał Mirosławda678292009-06-05 05:35:28 +00002114
2115#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2116/* This hook is defined here for ATM LANE */
2117int (*br_fdb_test_addr_hook)(struct net_device *dev,
2118 unsigned char *addr) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002119EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002120#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
Stephen Hemminger6229e362007-03-21 13:38:47 -07002122/*
2123 * If bridge module is loaded call bridging hook.
2124 * returns NULL if packet was consumed.
2125 */
2126struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2127 struct sk_buff *skb) __read_mostly;
Stephen Hemminger4fb019a2009-09-11 11:50:08 -07002128EXPORT_SYMBOL_GPL(br_handle_frame_hook);
Michał Mirosławda678292009-06-05 05:35:28 +00002129
Stephen Hemminger6229e362007-03-21 13:38:47 -07002130static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2131 struct packet_type **pt_prev, int *ret,
2132 struct net_device *orig_dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
2134 struct net_bridge_port *port;
2135
Stephen Hemminger6229e362007-03-21 13:38:47 -07002136 if (skb->pkt_type == PACKET_LOOPBACK ||
2137 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2138 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 if (*pt_prev) {
Stephen Hemminger6229e362007-03-21 13:38:47 -07002141 *ret = deliver_skb(skb, *pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 *pt_prev = NULL;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002143 }
2144
Stephen Hemminger6229e362007-03-21 13:38:47 -07002145 return br_handle_frame_hook(port, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146}
2147#else
Stephen Hemminger6229e362007-03-21 13:38:47 -07002148#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149#endif
2150
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002151#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2152struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2153EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2154
2155static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2156 struct packet_type **pt_prev,
2157 int *ret,
2158 struct net_device *orig_dev)
2159{
2160 if (skb->dev->macvlan_port == NULL)
2161 return skb;
2162
2163 if (*pt_prev) {
2164 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2165 *pt_prev = NULL;
2166 }
2167 return macvlan_handle_frame_hook(skb);
2168}
2169#else
2170#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2171#endif
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173#ifdef CONFIG_NET_CLS_ACT
2174/* TODO: Maybe we should just force sch_ingress to be compiled in
2175 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2176 * a compare and 2 stores extra right now if we dont have it on
2177 * but have CONFIG_NET_CLS_ACT
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002178 * NOTE: This doesnt stop any functionality; if you dont have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 * the ingress scheduler, you just cant add policies on ingress.
2180 *
2181 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002182static int ing_filter(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 struct net_device *dev = skb->dev;
Herbert Xuf697c3e2007-10-14 00:38:47 -07002185 u32 ttl = G_TC_RTTL(skb->tc_verd);
David S. Miller555353c2008-07-08 17:33:13 -07002186 struct netdev_queue *rxq;
2187 int result = TC_ACT_OK;
2188 struct Qdisc *q;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002189
Herbert Xuf697c3e2007-10-14 00:38:47 -07002190 if (MAX_RED_LOOP < ttl++) {
2191 printk(KERN_WARNING
2192 "Redir loop detected Dropping packet (%d->%d)\n",
2193 skb->iif, dev->ifindex);
2194 return TC_ACT_SHOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 }
2196
Herbert Xuf697c3e2007-10-14 00:38:47 -07002197 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2198 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2199
David S. Miller555353c2008-07-08 17:33:13 -07002200 rxq = &dev->rx_queue;
2201
David S. Miller83874002008-07-17 00:53:03 -07002202 q = rxq->qdisc;
David S. Miller8d50b532008-07-30 02:37:46 -07002203 if (q != &noop_qdisc) {
David S. Miller83874002008-07-17 00:53:03 -07002204 spin_lock(qdisc_lock(q));
David S. Millera9312ae2008-08-17 21:51:03 -07002205 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2206 result = qdisc_enqueue_root(skb, q);
David S. Miller83874002008-07-17 00:53:03 -07002207 spin_unlock(qdisc_lock(q));
2208 }
Herbert Xuf697c3e2007-10-14 00:38:47 -07002209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 return result;
2211}
Herbert Xuf697c3e2007-10-14 00:38:47 -07002212
2213static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2214 struct packet_type **pt_prev,
2215 int *ret, struct net_device *orig_dev)
2216{
David S. Miller8d50b532008-07-30 02:37:46 -07002217 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
Herbert Xuf697c3e2007-10-14 00:38:47 -07002218 goto out;
2219
2220 if (*pt_prev) {
2221 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2222 *pt_prev = NULL;
2223 } else {
2224 /* Huh? Why does turning on AF_PACKET affect this? */
2225 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2226 }
2227
2228 switch (ing_filter(skb)) {
2229 case TC_ACT_SHOT:
2230 case TC_ACT_STOLEN:
2231 kfree_skb(skb);
2232 return NULL;
2233 }
2234
2235out:
2236 skb->tc_verd = 0;
2237 return skb;
2238}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239#endif
2240
Patrick McHardybc1d0412008-07-14 22:49:30 -07002241/*
2242 * netif_nit_deliver - deliver received packets to network taps
2243 * @skb: buffer
2244 *
2245 * This function is used to deliver incoming packets to network
2246 * taps. It should be used when the normal netif_receive_skb path
2247 * is bypassed, for example because of VLAN acceleration.
2248 */
2249void netif_nit_deliver(struct sk_buff *skb)
2250{
2251 struct packet_type *ptype;
2252
2253 if (list_empty(&ptype_all))
2254 return;
2255
2256 skb_reset_network_header(skb);
2257 skb_reset_transport_header(skb);
2258 skb->mac_len = skb->network_header - skb->mac_header;
2259
2260 rcu_read_lock();
2261 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2262 if (!ptype->dev || ptype->dev == skb->dev)
2263 deliver_skb(skb, ptype, skb->dev);
2264 }
2265 rcu_read_unlock();
2266}
2267
Stephen Hemminger3b582cc2007-11-01 02:21:47 -07002268/**
2269 * netif_receive_skb - process receive buffer from network
2270 * @skb: buffer to process
2271 *
2272 * netif_receive_skb() is the main receive data processing function.
2273 * It always succeeds. The buffer may be dropped during processing
2274 * for congestion control or by the protocol layers.
2275 *
2276 * This function may only be called from softirq context and interrupts
2277 * should be enabled.
2278 *
2279 * Return values (usually ignored):
2280 * NET_RX_SUCCESS: no congestion
2281 * NET_RX_DROP: packet was dropped
2282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283int netif_receive_skb(struct sk_buff *skb)
2284{
2285 struct packet_type *ptype, *pt_prev;
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002286 struct net_device *orig_dev;
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002287 struct net_device *null_or_orig;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 int ret = NET_RX_DROP;
Al Viro252e3342006-11-14 20:48:11 -08002289 __be16 type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Eric Dumazet81bbb3d2009-09-30 16:42:42 -07002291 if (!skb->tstamp.tv64)
2292 net_timestamp(skb);
2293
Patrick McHardy9b22ea52008-11-04 14:49:57 -08002294 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2295 return NET_RX_SUCCESS;
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 /* if we've gotten here through NAPI, check netpoll */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002298 if (netpoll_receive_skb(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 return NET_RX_DROP;
2300
Patrick McHardyc01003c2007-03-29 11:46:52 -07002301 if (!skb->iif)
2302 skb->iif = skb->dev->ifindex;
David S. Miller86e65da2005-08-09 19:36:29 -07002303
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002304 null_or_orig = NULL;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002305 orig_dev = skb->dev;
2306 if (orig_dev->master) {
Joe Eykholt0d7a3682008-07-02 18:22:01 -07002307 if (skb_bond_should_drop(skb))
2308 null_or_orig = orig_dev; /* deliver only exact match */
2309 else
2310 skb->dev = orig_dev->master;
Joe Eykholtcc9bd5c2008-07-02 18:22:00 -07002311 }
Jay Vosburgh8f903c72006-02-21 16:36:44 -08002312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 __get_cpu_var(netdev_rx_stat).total++;
2314
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -07002315 skb_reset_network_header(skb);
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002316 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07002317 skb->mac_len = skb->network_header - skb->mac_header;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
2319 pt_prev = NULL;
2320
2321 rcu_read_lock();
2322
2323#ifdef CONFIG_NET_CLS_ACT
2324 if (skb->tc_verd & TC_NCLS) {
2325 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2326 goto ncls;
2327 }
2328#endif
2329
2330 list_for_each_entry_rcu(ptype, &ptype_all, list) {
Joe Eykholtf9823072008-07-02 18:22:02 -07002331 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2332 ptype->dev == orig_dev) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002333 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002334 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 pt_prev = ptype;
2336 }
2337 }
2338
2339#ifdef CONFIG_NET_CLS_ACT
Herbert Xuf697c3e2007-10-14 00:38:47 -07002340 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2341 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343ncls:
2344#endif
2345
Stephen Hemminger6229e362007-03-21 13:38:47 -07002346 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2347 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 goto out;
Patrick McHardyb863ceb2007-07-14 18:55:06 -07002349 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2350 if (!skb)
2351 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352
2353 type = skb->protocol;
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08002354 list_for_each_entry_rcu(ptype,
2355 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 if (ptype->type == type &&
Joe Eykholtf9823072008-07-02 18:22:02 -07002357 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2358 ptype->dev == orig_dev)) {
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002359 if (pt_prev)
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002360 ret = deliver_skb(skb, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 pt_prev = ptype;
2362 }
2363 }
2364
2365 if (pt_prev) {
David S. Millerf2ccd8f2005-08-09 19:34:12 -07002366 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 } else {
2368 kfree_skb(skb);
2369 /* Jamal, now you will not able to escape explaining
2370 * me how you were going to use this. :-)
2371 */
2372 ret = NET_RX_DROP;
2373 }
2374
2375out:
2376 rcu_read_unlock();
2377 return ret;
2378}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002379EXPORT_SYMBOL(netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07002381/* Network device is going away, flush any packets still pending */
2382static void flush_backlog(void *arg)
2383{
2384 struct net_device *dev = arg;
2385 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2386 struct sk_buff *skb, *tmp;
2387
2388 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2389 if (skb->dev == dev) {
2390 __skb_unlink(skb, &queue->input_pkt_queue);
2391 kfree_skb(skb);
2392 }
2393}
2394
Herbert Xud565b0a2008-12-15 23:38:52 -08002395static int napi_gro_complete(struct sk_buff *skb)
2396{
2397 struct packet_type *ptype;
2398 __be16 type = skb->protocol;
2399 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2400 int err = -ENOENT;
2401
Herbert Xufc59f9a2009-04-14 15:11:06 -07002402 if (NAPI_GRO_CB(skb)->count == 1) {
2403 skb_shinfo(skb)->gso_size = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002404 goto out;
Herbert Xufc59f9a2009-04-14 15:11:06 -07002405 }
Herbert Xud565b0a2008-12-15 23:38:52 -08002406
2407 rcu_read_lock();
2408 list_for_each_entry_rcu(ptype, head, list) {
2409 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2410 continue;
2411
2412 err = ptype->gro_complete(skb);
2413 break;
2414 }
2415 rcu_read_unlock();
2416
2417 if (err) {
2418 WARN_ON(&ptype->list == head);
2419 kfree_skb(skb);
2420 return NET_RX_SUCCESS;
2421 }
2422
2423out:
Herbert Xud565b0a2008-12-15 23:38:52 -08002424 return netif_receive_skb(skb);
2425}
2426
2427void napi_gro_flush(struct napi_struct *napi)
2428{
2429 struct sk_buff *skb, *next;
2430
2431 for (skb = napi->gro_list; skb; skb = next) {
2432 next = skb->next;
2433 skb->next = NULL;
2434 napi_gro_complete(skb);
2435 }
2436
Herbert Xu4ae55442009-02-08 18:00:36 +00002437 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002438 napi->gro_list = NULL;
2439}
2440EXPORT_SYMBOL(napi_gro_flush);
2441
Herbert Xu96e93ea2009-01-06 10:49:34 -08002442int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
Herbert Xud565b0a2008-12-15 23:38:52 -08002443{
2444 struct sk_buff **pp = NULL;
2445 struct packet_type *ptype;
2446 __be16 type = skb->protocol;
2447 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
Herbert Xu0da2afd52008-12-26 14:57:42 -08002448 int same_flow;
Herbert Xud565b0a2008-12-15 23:38:52 -08002449 int mac_len;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002450 int ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002451
2452 if (!(skb->dev->features & NETIF_F_GRO))
2453 goto normal;
2454
David S. Miller4cf704f2009-06-09 00:18:51 -07002455 if (skb_is_gso(skb) || skb_has_frags(skb))
Herbert Xuf17f5c92009-01-14 14:36:12 -08002456 goto normal;
2457
Herbert Xud565b0a2008-12-15 23:38:52 -08002458 rcu_read_lock();
2459 list_for_each_entry_rcu(ptype, head, list) {
Herbert Xud565b0a2008-12-15 23:38:52 -08002460 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2461 continue;
2462
Herbert Xu86911732009-01-29 14:19:50 +00002463 skb_set_network_header(skb, skb_gro_offset(skb));
Herbert Xud565b0a2008-12-15 23:38:52 -08002464 mac_len = skb->network_header - skb->mac_header;
2465 skb->mac_len = mac_len;
2466 NAPI_GRO_CB(skb)->same_flow = 0;
2467 NAPI_GRO_CB(skb)->flush = 0;
Herbert Xu5d38a072009-01-04 16:13:40 -08002468 NAPI_GRO_CB(skb)->free = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002469
Herbert Xud565b0a2008-12-15 23:38:52 -08002470 pp = ptype->gro_receive(&napi->gro_list, skb);
2471 break;
2472 }
2473 rcu_read_unlock();
2474
2475 if (&ptype->list == head)
2476 goto normal;
2477
Herbert Xu0da2afd52008-12-26 14:57:42 -08002478 same_flow = NAPI_GRO_CB(skb)->same_flow;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002479 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
Herbert Xu0da2afd52008-12-26 14:57:42 -08002480
Herbert Xud565b0a2008-12-15 23:38:52 -08002481 if (pp) {
2482 struct sk_buff *nskb = *pp;
2483
2484 *pp = nskb->next;
2485 nskb->next = NULL;
2486 napi_gro_complete(nskb);
Herbert Xu4ae55442009-02-08 18:00:36 +00002487 napi->gro_count--;
Herbert Xud565b0a2008-12-15 23:38:52 -08002488 }
2489
Herbert Xu0da2afd52008-12-26 14:57:42 -08002490 if (same_flow)
Herbert Xud565b0a2008-12-15 23:38:52 -08002491 goto ok;
2492
Herbert Xu4ae55442009-02-08 18:00:36 +00002493 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
Herbert Xud565b0a2008-12-15 23:38:52 -08002494 goto normal;
Herbert Xud565b0a2008-12-15 23:38:52 -08002495
Herbert Xu4ae55442009-02-08 18:00:36 +00002496 napi->gro_count++;
Herbert Xud565b0a2008-12-15 23:38:52 -08002497 NAPI_GRO_CB(skb)->count = 1;
Herbert Xu86911732009-01-29 14:19:50 +00002498 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002499 skb->next = napi->gro_list;
2500 napi->gro_list = skb;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002501 ret = GRO_HELD;
Herbert Xud565b0a2008-12-15 23:38:52 -08002502
Herbert Xuad0f9902009-02-01 01:24:55 -08002503pull:
Herbert Xucb189782009-05-26 18:50:31 +00002504 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2505 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2506
2507 BUG_ON(skb->end - skb->tail < grow);
2508
2509 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2510
2511 skb->tail += grow;
2512 skb->data_len -= grow;
2513
2514 skb_shinfo(skb)->frags[0].page_offset += grow;
2515 skb_shinfo(skb)->frags[0].size -= grow;
2516
2517 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2518 put_page(skb_shinfo(skb)->frags[0].page);
2519 memmove(skb_shinfo(skb)->frags,
2520 skb_shinfo(skb)->frags + 1,
2521 --skb_shinfo(skb)->nr_frags);
2522 }
Herbert Xuad0f9902009-02-01 01:24:55 -08002523 }
2524
Herbert Xud565b0a2008-12-15 23:38:52 -08002525ok:
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002526 return ret;
Herbert Xud565b0a2008-12-15 23:38:52 -08002527
2528normal:
Herbert Xuad0f9902009-02-01 01:24:55 -08002529 ret = GRO_NORMAL;
2530 goto pull;
Herbert Xu5d38a072009-01-04 16:13:40 -08002531}
Herbert Xu96e93ea2009-01-06 10:49:34 -08002532EXPORT_SYMBOL(dev_gro_receive);
2533
2534static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2535{
2536 struct sk_buff *p;
2537
Herbert Xud1c76af2009-03-16 10:50:02 -07002538 if (netpoll_rx_on(skb))
2539 return GRO_NORMAL;
2540
Herbert Xu96e93ea2009-01-06 10:49:34 -08002541 for (p = napi->gro_list; p; p = p->next) {
Stephen Hemmingerf2bde732009-04-01 11:20:20 +00002542 NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
2543 && !compare_ether_header(skb_mac_header(p),
2544 skb_gro_mac_header(skb));
Herbert Xu96e93ea2009-01-06 10:49:34 -08002545 NAPI_GRO_CB(p)->flush = 0;
2546 }
2547
2548 return dev_gro_receive(napi, skb);
2549}
Herbert Xu5d38a072009-01-04 16:13:40 -08002550
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002551int napi_skb_finish(int ret, struct sk_buff *skb)
Herbert Xu5d38a072009-01-04 16:13:40 -08002552{
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002553 int err = NET_RX_SUCCESS;
2554
2555 switch (ret) {
2556 case GRO_NORMAL:
Herbert Xu5d38a072009-01-04 16:13:40 -08002557 return netif_receive_skb(skb);
2558
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002559 case GRO_DROP:
2560 err = NET_RX_DROP;
2561 /* fall through */
2562
2563 case GRO_MERGED_FREE:
Herbert Xu5d38a072009-01-04 16:13:40 -08002564 kfree_skb(skb);
2565 break;
2566 }
2567
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002568 return err;
2569}
2570EXPORT_SYMBOL(napi_skb_finish);
2571
Herbert Xu78a478d2009-05-26 18:50:21 +00002572void skb_gro_reset_offset(struct sk_buff *skb)
2573{
2574 NAPI_GRO_CB(skb)->data_offset = 0;
2575 NAPI_GRO_CB(skb)->frag0 = NULL;
Herbert Xu74895942009-05-26 18:50:27 +00002576 NAPI_GRO_CB(skb)->frag0_len = 0;
Herbert Xu78a478d2009-05-26 18:50:21 +00002577
Herbert Xu78d3fd02009-05-26 18:50:23 +00002578 if (skb->mac_header == skb->tail &&
Herbert Xu74895942009-05-26 18:50:27 +00002579 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
Herbert Xu78a478d2009-05-26 18:50:21 +00002580 NAPI_GRO_CB(skb)->frag0 =
2581 page_address(skb_shinfo(skb)->frags[0].page) +
2582 skb_shinfo(skb)->frags[0].page_offset;
Herbert Xu74895942009-05-26 18:50:27 +00002583 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
2584 }
Herbert Xu78a478d2009-05-26 18:50:21 +00002585}
2586EXPORT_SYMBOL(skb_gro_reset_offset);
2587
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002588int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2589{
Herbert Xu86911732009-01-29 14:19:50 +00002590 skb_gro_reset_offset(skb);
2591
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002592 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
Herbert Xud565b0a2008-12-15 23:38:52 -08002593}
2594EXPORT_SYMBOL(napi_gro_receive);
2595
Herbert Xu96e93ea2009-01-06 10:49:34 -08002596void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2597{
Herbert Xu96e93ea2009-01-06 10:49:34 -08002598 __skb_pull(skb, skb_headlen(skb));
2599 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2600
2601 napi->skb = skb;
2602}
2603EXPORT_SYMBOL(napi_reuse_skb);
2604
Herbert Xu76620aa2009-04-16 02:02:07 -07002605struct sk_buff *napi_get_frags(struct napi_struct *napi)
Herbert Xu5d38a072009-01-04 16:13:40 -08002606{
2607 struct net_device *dev = napi->dev;
2608 struct sk_buff *skb = napi->skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002609
2610 if (!skb) {
2611 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2612 if (!skb)
2613 goto out;
2614
2615 skb_reserve(skb, NET_IP_ALIGN);
Herbert Xu76620aa2009-04-16 02:02:07 -07002616
2617 napi->skb = skb;
Herbert Xu5d38a072009-01-04 16:13:40 -08002618 }
2619
Herbert Xu96e93ea2009-01-06 10:49:34 -08002620out:
2621 return skb;
2622}
Herbert Xu76620aa2009-04-16 02:02:07 -07002623EXPORT_SYMBOL(napi_get_frags);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002624
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002625int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2626{
2627 int err = NET_RX_SUCCESS;
2628
2629 switch (ret) {
2630 case GRO_NORMAL:
Herbert Xu86911732009-01-29 14:19:50 +00002631 case GRO_HELD:
Herbert Xu86911732009-01-29 14:19:50 +00002632 skb->protocol = eth_type_trans(skb, napi->dev);
2633
2634 if (ret == GRO_NORMAL)
2635 return netif_receive_skb(skb);
2636
2637 skb_gro_pull(skb, -ETH_HLEN);
2638 break;
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002639
2640 case GRO_DROP:
2641 err = NET_RX_DROP;
2642 /* fall through */
2643
2644 case GRO_MERGED_FREE:
2645 napi_reuse_skb(napi, skb);
2646 break;
2647 }
2648
2649 return err;
2650}
2651EXPORT_SYMBOL(napi_frags_finish);
2652
Herbert Xu76620aa2009-04-16 02:02:07 -07002653struct sk_buff *napi_frags_skb(struct napi_struct *napi)
Herbert Xu96e93ea2009-01-06 10:49:34 -08002654{
Herbert Xu76620aa2009-04-16 02:02:07 -07002655 struct sk_buff *skb = napi->skb;
2656 struct ethhdr *eth;
Herbert Xua5b1cf22009-05-26 18:50:28 +00002657 unsigned int hlen;
2658 unsigned int off;
Herbert Xu76620aa2009-04-16 02:02:07 -07002659
2660 napi->skb = NULL;
2661
2662 skb_reset_mac_header(skb);
2663 skb_gro_reset_offset(skb);
2664
Herbert Xua5b1cf22009-05-26 18:50:28 +00002665 off = skb_gro_offset(skb);
2666 hlen = off + sizeof(*eth);
2667 eth = skb_gro_header_fast(skb, off);
2668 if (skb_gro_header_hard(skb, hlen)) {
2669 eth = skb_gro_header_slow(skb, hlen, off);
2670 if (unlikely(!eth)) {
2671 napi_reuse_skb(napi, skb);
2672 skb = NULL;
2673 goto out;
2674 }
Herbert Xu76620aa2009-04-16 02:02:07 -07002675 }
2676
2677 skb_gro_pull(skb, sizeof(*eth));
2678
2679 /*
2680 * This works because the only protocols we care about don't require
2681 * special handling. We'll fix it up properly at the end.
2682 */
2683 skb->protocol = eth->h_proto;
2684
2685out:
2686 return skb;
2687}
2688EXPORT_SYMBOL(napi_frags_skb);
2689
2690int napi_gro_frags(struct napi_struct *napi)
2691{
2692 struct sk_buff *skb = napi_frags_skb(napi);
Herbert Xu96e93ea2009-01-06 10:49:34 -08002693
2694 if (!skb)
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002695 return NET_RX_DROP;
Herbert Xu96e93ea2009-01-06 10:49:34 -08002696
Herbert Xu5d0d9be2009-01-29 14:19:48 +00002697 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
Herbert Xu5d38a072009-01-04 16:13:40 -08002698}
2699EXPORT_SYMBOL(napi_gro_frags);
2700
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002701static int process_backlog(struct napi_struct *napi, int quota)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
2703 int work = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2705 unsigned long start_time = jiffies;
2706
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002707 napi->weight = weight_p;
2708 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711 local_irq_disable();
2712 skb = __skb_dequeue(&queue->input_pkt_queue);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002713 if (!skb) {
Herbert Xu8f1ead22009-03-26 00:59:10 -07002714 __napi_complete(napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002715 local_irq_enable();
Herbert Xu8f1ead22009-03-26 00:59:10 -07002716 break;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002717 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 local_irq_enable();
2719
Herbert Xu8f1ead22009-03-26 00:59:10 -07002720 netif_receive_skb(skb);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002721 } while (++work < quota && jiffies == start_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002723 return work;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
2725
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002726/**
2727 * __napi_schedule - schedule for receive
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07002728 * @n: entry to schedule
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002729 *
2730 * The entry's receive function will be scheduled to run
2731 */
Harvey Harrisonb5606c22008-02-13 15:03:16 -08002732void __napi_schedule(struct napi_struct *n)
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002733{
2734 unsigned long flags;
2735
2736 local_irq_save(flags);
2737 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2738 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2739 local_irq_restore(flags);
2740}
2741EXPORT_SYMBOL(__napi_schedule);
2742
Herbert Xud565b0a2008-12-15 23:38:52 -08002743void __napi_complete(struct napi_struct *n)
2744{
2745 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2746 BUG_ON(n->gro_list);
2747
2748 list_del(&n->poll_list);
2749 smp_mb__before_clear_bit();
2750 clear_bit(NAPI_STATE_SCHED, &n->state);
2751}
2752EXPORT_SYMBOL(__napi_complete);
2753
2754void napi_complete(struct napi_struct *n)
2755{
2756 unsigned long flags;
2757
2758 /*
2759 * don't let napi dequeue from the cpu poll list
2760 * just in case its running on a different cpu
2761 */
2762 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2763 return;
2764
2765 napi_gro_flush(n);
2766 local_irq_save(flags);
2767 __napi_complete(n);
2768 local_irq_restore(flags);
2769}
2770EXPORT_SYMBOL(napi_complete);
2771
2772void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2773 int (*poll)(struct napi_struct *, int), int weight)
2774{
2775 INIT_LIST_HEAD(&napi->poll_list);
Herbert Xu4ae55442009-02-08 18:00:36 +00002776 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002777 napi->gro_list = NULL;
Herbert Xu5d38a072009-01-04 16:13:40 -08002778 napi->skb = NULL;
Herbert Xud565b0a2008-12-15 23:38:52 -08002779 napi->poll = poll;
2780 napi->weight = weight;
2781 list_add(&napi->dev_list, &dev->napi_list);
Herbert Xud565b0a2008-12-15 23:38:52 -08002782 napi->dev = dev;
Herbert Xu5d38a072009-01-04 16:13:40 -08002783#ifdef CONFIG_NETPOLL
Herbert Xud565b0a2008-12-15 23:38:52 -08002784 spin_lock_init(&napi->poll_lock);
2785 napi->poll_owner = -1;
2786#endif
2787 set_bit(NAPI_STATE_SCHED, &napi->state);
2788}
2789EXPORT_SYMBOL(netif_napi_add);
2790
2791void netif_napi_del(struct napi_struct *napi)
2792{
2793 struct sk_buff *skb, *next;
2794
Peter P Waskiewicz Jrd7b06632008-12-26 01:35:35 -08002795 list_del_init(&napi->dev_list);
Herbert Xu76620aa2009-04-16 02:02:07 -07002796 napi_free_frags(napi);
Herbert Xud565b0a2008-12-15 23:38:52 -08002797
2798 for (skb = napi->gro_list; skb; skb = next) {
2799 next = skb->next;
2800 skb->next = NULL;
2801 kfree_skb(skb);
2802 }
2803
2804 napi->gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00002805 napi->gro_count = 0;
Herbert Xud565b0a2008-12-15 23:38:52 -08002806}
2807EXPORT_SYMBOL(netif_napi_del);
2808
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002809
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810static void net_rx_action(struct softirq_action *h)
2811{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002812 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002813 unsigned long time_limit = jiffies + 2;
Stephen Hemminger51b0bde2005-06-23 20:14:40 -07002814 int budget = netdev_budget;
Matt Mackall53fb95d2005-08-11 19:27:43 -07002815 void *have;
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 local_irq_disable();
2818
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002819 while (!list_empty(list)) {
2820 struct napi_struct *n;
2821 int work, weight;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002823 /* If softirq window is exhuasted then punt.
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002824 * Allow this to run for 2 jiffies since which will allow
2825 * an average latency of 1.5/HZ.
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002826 */
Stephen Hemminger24f8b232008-11-03 17:14:38 -08002827 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 goto softnet_break;
2829
2830 local_irq_enable();
2831
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002832 /* Even though interrupts have been re-enabled, this
2833 * access is safe because interrupts can only add new
2834 * entries to the tail of this list, and only ->poll()
2835 * calls can remove this head entry from the list.
2836 */
2837 n = list_entry(list->next, struct napi_struct, poll_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002839 have = netpoll_poll_lock(n);
2840
2841 weight = n->weight;
2842
David S. Miller0a7606c2007-10-29 21:28:47 -07002843 /* This NAPI_STATE_SCHED test is for avoiding a race
2844 * with netpoll's poll_napi(). Only the entity which
2845 * obtains the lock and sees NAPI_STATE_SCHED set will
2846 * actually make the ->poll() call. Therefore we avoid
2847 * accidently calling ->poll() when NAPI is not scheduled.
2848 */
2849 work = 0;
Neil Horman4ea7e382009-05-21 07:36:08 +00002850 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
David S. Miller0a7606c2007-10-29 21:28:47 -07002851 work = n->poll(n, weight);
Neil Horman4ea7e382009-05-21 07:36:08 +00002852 trace_napi_poll(n);
2853 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002854
2855 WARN_ON_ONCE(work > weight);
2856
2857 budget -= work;
2858
2859 local_irq_disable();
2860
2861 /* Drivers must not modify the NAPI state if they
2862 * consume the entire weight. In such cases this code
2863 * still "owns" the NAPI instance and therefore can
2864 * move the instance around on the list at-will.
2865 */
David S. Millerfed17f32008-01-07 21:00:40 -08002866 if (unlikely(work == weight)) {
Herbert Xuff780cd2009-06-26 19:27:04 -07002867 if (unlikely(napi_disable_pending(n))) {
2868 local_irq_enable();
2869 napi_complete(n);
2870 local_irq_disable();
2871 } else
David S. Millerfed17f32008-01-07 21:00:40 -08002872 list_move_tail(&n->poll_list, list);
2873 }
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002874
2875 netpoll_poll_unlock(have);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 }
2877out:
Shannon Nelson515e06c2007-06-23 23:09:23 -07002878 local_irq_enable();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002879
Chris Leechdb217332006-06-17 21:24:58 -07002880#ifdef CONFIG_NET_DMA
2881 /*
2882 * There may not be any more sk_buffs coming right now, so push
2883 * any pending DMA copies to hardware
2884 */
Dan Williams2ba05622009-01-06 11:38:14 -07002885 dma_issue_pending_all();
Chris Leechdb217332006-06-17 21:24:58 -07002886#endif
Stephen Hemmingerbea33482007-10-03 16:41:36 -07002887
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 return;
2889
2890softnet_break:
2891 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2892 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2893 goto out;
2894}
2895
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002896static gifconf_func_t *gifconf_list[NPROTO];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897
2898/**
2899 * register_gifconf - register a SIOCGIF handler
2900 * @family: Address family
2901 * @gifconf: Function handler
2902 *
2903 * Register protocol dependent address dumping routines. The handler
2904 * that is passed must not be freed or reused until it has been replaced
2905 * by another handler.
2906 */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002907int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908{
2909 if (family >= NPROTO)
2910 return -EINVAL;
2911 gifconf_list[family] = gifconf;
2912 return 0;
2913}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07002914EXPORT_SYMBOL(register_gifconf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915
2916
2917/*
2918 * Map an interface index to its name (SIOCGIFNAME)
2919 */
2920
2921/*
2922 * We need this ioctl for efficient implementation of the
2923 * if_indextoname() function required by the IPv6 API. Without
2924 * it, we would have to search all the interfaces to find a
2925 * match. --pb
2926 */
2927
Eric W. Biederman881d9662007-09-17 11:56:21 -07002928static int dev_ifname(struct net *net, struct ifreq __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929{
2930 struct net_device *dev;
2931 struct ifreq ifr;
2932
2933 /*
2934 * Fetch the caller's info block.
2935 */
2936
2937 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2938 return -EFAULT;
2939
2940 read_lock(&dev_base_lock);
Eric W. Biederman881d9662007-09-17 11:56:21 -07002941 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 if (!dev) {
2943 read_unlock(&dev_base_lock);
2944 return -ENODEV;
2945 }
2946
2947 strcpy(ifr.ifr_name, dev->name);
2948 read_unlock(&dev_base_lock);
2949
2950 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2951 return -EFAULT;
2952 return 0;
2953}
2954
2955/*
2956 * Perform a SIOCGIFCONF call. This structure will change
2957 * size eventually, and there is nothing I can do about it.
2958 * Thus we will need a 'compatibility mode'.
2959 */
2960
Eric W. Biederman881d9662007-09-17 11:56:21 -07002961static int dev_ifconf(struct net *net, char __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962{
2963 struct ifconf ifc;
2964 struct net_device *dev;
2965 char __user *pos;
2966 int len;
2967 int total;
2968 int i;
2969
2970 /*
2971 * Fetch the caller's info block.
2972 */
2973
2974 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2975 return -EFAULT;
2976
2977 pos = ifc.ifc_buf;
2978 len = ifc.ifc_len;
2979
2980 /*
2981 * Loop over the interfaces, and write an info block for each.
2982 */
2983
2984 total = 0;
Eric W. Biederman881d9662007-09-17 11:56:21 -07002985 for_each_netdev(net, dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 for (i = 0; i < NPROTO; i++) {
2987 if (gifconf_list[i]) {
2988 int done;
2989 if (!pos)
2990 done = gifconf_list[i](dev, NULL, 0);
2991 else
2992 done = gifconf_list[i](dev, pos + total,
2993 len - total);
2994 if (done < 0)
2995 return -EFAULT;
2996 total += done;
2997 }
2998 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09002999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000
3001 /*
3002 * All done. Write the updated control block back to the caller.
3003 */
3004 ifc.ifc_len = total;
3005
3006 /*
3007 * Both BSD and Solaris return 0 here, so we do too.
3008 */
3009 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3010}
3011
3012#ifdef CONFIG_PROC_FS
3013/*
3014 * This is invoked by the /proc filesystem handler to display a device
3015 * in detail.
3016 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017void *dev_seq_start(struct seq_file *seq, loff_t *pos)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003018 __acquires(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019{
Denis V. Luneve372c412007-11-19 22:31:54 -08003020 struct net *net = seq_file_net(seq);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003021 loff_t off;
3022 struct net_device *dev;
3023
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 read_lock(&dev_base_lock);
Pavel Emelianov7562f872007-05-03 15:13:45 -07003025 if (!*pos)
3026 return SEQ_START_TOKEN;
3027
3028 off = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003029 for_each_netdev(net, dev)
Pavel Emelianov7562f872007-05-03 15:13:45 -07003030 if (off++ == *pos)
3031 return dev;
3032
3033 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034}
3035
3036void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3037{
Denis V. Luneve372c412007-11-19 22:31:54 -08003038 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 ++*pos;
Pavel Emelianov7562f872007-05-03 15:13:45 -07003040 return v == SEQ_START_TOKEN ?
Eric W. Biederman881d9662007-09-17 11:56:21 -07003041 first_net_device(net) : next_net_device((struct net_device *)v);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042}
3043
3044void dev_seq_stop(struct seq_file *seq, void *v)
Eric Dumazet9a429c42008-01-01 21:58:02 -08003045 __releases(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046{
3047 read_unlock(&dev_base_lock);
3048}
3049
3050static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3051{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08003052 const struct net_device_stats *stats = dev_get_stats(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Rusty Russell5a1b5892007-04-28 21:04:03 -07003054 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3055 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3056 dev->name, stats->rx_bytes, stats->rx_packets,
3057 stats->rx_errors,
3058 stats->rx_dropped + stats->rx_missed_errors,
3059 stats->rx_fifo_errors,
3060 stats->rx_length_errors + stats->rx_over_errors +
3061 stats->rx_crc_errors + stats->rx_frame_errors,
3062 stats->rx_compressed, stats->multicast,
3063 stats->tx_bytes, stats->tx_packets,
3064 stats->tx_errors, stats->tx_dropped,
3065 stats->tx_fifo_errors, stats->collisions,
3066 stats->tx_carrier_errors +
3067 stats->tx_aborted_errors +
3068 stats->tx_window_errors +
3069 stats->tx_heartbeat_errors,
3070 stats->tx_compressed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071}
3072
3073/*
3074 * Called from the PROCfs module. This now uses the new arbitrary sized
3075 * /proc/net interface to create /proc/net/dev
3076 */
3077static int dev_seq_show(struct seq_file *seq, void *v)
3078{
3079 if (v == SEQ_START_TOKEN)
3080 seq_puts(seq, "Inter-| Receive "
3081 " | Transmit\n"
3082 " face |bytes packets errs drop fifo frame "
3083 "compressed multicast|bytes packets errs "
3084 "drop fifo colls carrier compressed\n");
3085 else
3086 dev_seq_printf_stats(seq, v);
3087 return 0;
3088}
3089
3090static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3091{
3092 struct netif_rx_stats *rc = NULL;
3093
Mike Travis0c0b0ac2008-05-02 16:43:08 -07003094 while (*pos < nr_cpu_ids)
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003095 if (cpu_online(*pos)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 rc = &per_cpu(netdev_rx_stat, *pos);
3097 break;
3098 } else
3099 ++*pos;
3100 return rc;
3101}
3102
3103static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3104{
3105 return softnet_get_online(pos);
3106}
3107
3108static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3109{
3110 ++*pos;
3111 return softnet_get_online(pos);
3112}
3113
3114static void softnet_seq_stop(struct seq_file *seq, void *v)
3115{
3116}
3117
3118static int softnet_seq_show(struct seq_file *seq, void *v)
3119{
3120 struct netif_rx_stats *s = v;
3121
3122 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
Stephen Hemminger31aa02c2005-06-23 20:12:48 -07003123 s->total, s->dropped, s->time_squeeze, 0,
Stephen Hemmingerc1ebcdb2005-06-23 20:08:59 -07003124 0, 0, 0, 0, /* was fastroute */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003125 s->cpu_collision);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126 return 0;
3127}
3128
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003129static const struct seq_operations dev_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 .start = dev_seq_start,
3131 .next = dev_seq_next,
3132 .stop = dev_seq_stop,
3133 .show = dev_seq_show,
3134};
3135
3136static int dev_seq_open(struct inode *inode, struct file *file)
3137{
Denis V. Luneve372c412007-11-19 22:31:54 -08003138 return seq_open_net(inode, file, &dev_seq_ops,
3139 sizeof(struct seq_net_private));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140}
3141
Arjan van de Ven9a321442007-02-12 00:55:35 -08003142static const struct file_operations dev_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 .owner = THIS_MODULE,
3144 .open = dev_seq_open,
3145 .read = seq_read,
3146 .llseek = seq_lseek,
Denis V. Luneve372c412007-11-19 22:31:54 -08003147 .release = seq_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148};
3149
Stephen Hemmingerf6908082007-03-12 14:34:29 -07003150static const struct seq_operations softnet_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151 .start = softnet_seq_start,
3152 .next = softnet_seq_next,
3153 .stop = softnet_seq_stop,
3154 .show = softnet_seq_show,
3155};
3156
3157static int softnet_seq_open(struct inode *inode, struct file *file)
3158{
3159 return seq_open(file, &softnet_seq_ops);
3160}
3161
Arjan van de Ven9a321442007-02-12 00:55:35 -08003162static const struct file_operations softnet_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 .owner = THIS_MODULE,
3164 .open = softnet_seq_open,
3165 .read = seq_read,
3166 .llseek = seq_lseek,
3167 .release = seq_release,
3168};
3169
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003170static void *ptype_get_idx(loff_t pos)
3171{
3172 struct packet_type *pt = NULL;
3173 loff_t i = 0;
3174 int t;
3175
3176 list_for_each_entry_rcu(pt, &ptype_all, list) {
3177 if (i == pos)
3178 return pt;
3179 ++i;
3180 }
3181
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003182 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003183 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3184 if (i == pos)
3185 return pt;
3186 ++i;
3187 }
3188 }
3189 return NULL;
3190}
3191
3192static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003193 __acquires(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003194{
3195 rcu_read_lock();
3196 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3197}
3198
3199static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3200{
3201 struct packet_type *pt;
3202 struct list_head *nxt;
3203 int hash;
3204
3205 ++*pos;
3206 if (v == SEQ_START_TOKEN)
3207 return ptype_get_idx(0);
3208
3209 pt = v;
3210 nxt = pt->list.next;
3211 if (pt->type == htons(ETH_P_ALL)) {
3212 if (nxt != &ptype_all)
3213 goto found;
3214 hash = 0;
3215 nxt = ptype_base[0].next;
3216 } else
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003217 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003218
3219 while (nxt == &ptype_base[hash]) {
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08003220 if (++hash >= PTYPE_HASH_SIZE)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003221 return NULL;
3222 nxt = ptype_base[hash].next;
3223 }
3224found:
3225 return list_entry(nxt, struct packet_type, list);
3226}
3227
3228static void ptype_seq_stop(struct seq_file *seq, void *v)
Stephen Hemminger72348a42008-01-21 02:27:29 -08003229 __releases(RCU)
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003230{
3231 rcu_read_unlock();
3232}
3233
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003234static int ptype_seq_show(struct seq_file *seq, void *v)
3235{
3236 struct packet_type *pt = v;
3237
3238 if (v == SEQ_START_TOKEN)
3239 seq_puts(seq, "Type Device Function\n");
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003240 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003241 if (pt->type == htons(ETH_P_ALL))
3242 seq_puts(seq, "ALL ");
3243 else
3244 seq_printf(seq, "%04x", ntohs(pt->type));
3245
Alexey Dobriyan908cd2d2008-11-16 19:50:35 -08003246 seq_printf(seq, " %-8s %pF\n",
3247 pt->dev ? pt->dev->name : "", pt->func);
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003248 }
3249
3250 return 0;
3251}
3252
3253static const struct seq_operations ptype_seq_ops = {
3254 .start = ptype_seq_start,
3255 .next = ptype_seq_next,
3256 .stop = ptype_seq_stop,
3257 .show = ptype_seq_show,
3258};
3259
3260static int ptype_seq_open(struct inode *inode, struct file *file)
3261{
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003262 return seq_open_net(inode, file, &ptype_seq_ops,
3263 sizeof(struct seq_net_private));
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003264}
3265
3266static const struct file_operations ptype_seq_fops = {
3267 .owner = THIS_MODULE,
3268 .open = ptype_seq_open,
3269 .read = seq_read,
3270 .llseek = seq_lseek,
Pavel Emelyanov2feb27d2008-03-24 14:57:45 -07003271 .release = seq_release_net,
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003272};
3273
3274
Pavel Emelyanov46650792007-10-08 20:38:39 -07003275static int __net_init dev_proc_net_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276{
3277 int rc = -ENOMEM;
3278
Eric W. Biederman881d9662007-09-17 11:56:21 -07003279 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280 goto out;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003281 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 goto out_dev;
Eric W. Biederman881d9662007-09-17 11:56:21 -07003283 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003284 goto out_softnet;
Stephen Hemminger0e1256f2007-03-12 14:35:37 -07003285
Eric W. Biederman881d9662007-09-17 11:56:21 -07003286 if (wext_proc_init(net))
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003287 goto out_ptype;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 rc = 0;
3289out:
3290 return rc;
Eric W. Biederman457c4cb2007-09-12 12:01:34 +02003291out_ptype:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003292 proc_net_remove(net, "ptype");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293out_softnet:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003294 proc_net_remove(net, "softnet_stat");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295out_dev:
Eric W. Biederman881d9662007-09-17 11:56:21 -07003296 proc_net_remove(net, "dev");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 goto out;
3298}
Eric W. Biederman881d9662007-09-17 11:56:21 -07003299
Pavel Emelyanov46650792007-10-08 20:38:39 -07003300static void __net_exit dev_proc_net_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07003301{
3302 wext_proc_exit(net);
3303
3304 proc_net_remove(net, "ptype");
3305 proc_net_remove(net, "softnet_stat");
3306 proc_net_remove(net, "dev");
3307}
3308
Denis V. Lunev022cbae2007-11-13 03:23:50 -08003309static struct pernet_operations __net_initdata dev_proc_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07003310 .init = dev_proc_net_init,
3311 .exit = dev_proc_net_exit,
3312};
3313
3314static int __init dev_proc_init(void)
3315{
3316 return register_pernet_subsys(&dev_proc_ops);
3317}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318#else
3319#define dev_proc_init() 0
3320#endif /* CONFIG_PROC_FS */
3321
3322
3323/**
3324 * netdev_set_master - set up master/slave pair
3325 * @slave: slave device
3326 * @master: new master device
3327 *
3328 * Changes the master device of the slave. Pass %NULL to break the
3329 * bonding. The caller must hold the RTNL semaphore. On a failure
3330 * a negative errno code is returned. On success the reference counts
3331 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3332 * function returns zero.
3333 */
3334int netdev_set_master(struct net_device *slave, struct net_device *master)
3335{
3336 struct net_device *old = slave->master;
3337
3338 ASSERT_RTNL();
3339
3340 if (master) {
3341 if (old)
3342 return -EBUSY;
3343 dev_hold(master);
3344 }
3345
3346 slave->master = master;
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09003347
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 synchronize_net();
3349
3350 if (old)
3351 dev_put(old);
3352
3353 if (master)
3354 slave->flags |= IFF_SLAVE;
3355 else
3356 slave->flags &= ~IFF_SLAVE;
3357
3358 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3359 return 0;
3360}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003361EXPORT_SYMBOL(netdev_set_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003363static void dev_change_rx_flags(struct net_device *dev, int flags)
3364{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003365 const struct net_device_ops *ops = dev->netdev_ops;
3366
3367 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3368 ops->ndo_change_rx_flags(dev, flags);
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003369}
3370
Wang Chendad9b332008-06-18 01:48:28 -07003371static int __dev_set_promiscuity(struct net_device *dev, int inc)
Patrick McHardy4417da62007-06-27 01:28:10 -07003372{
3373 unsigned short old_flags = dev->flags;
David Howells8192b0c2008-11-14 10:39:10 +11003374 uid_t uid;
3375 gid_t gid;
Patrick McHardy4417da62007-06-27 01:28:10 -07003376
Patrick McHardy24023452007-07-14 18:51:31 -07003377 ASSERT_RTNL();
3378
Wang Chendad9b332008-06-18 01:48:28 -07003379 dev->flags |= IFF_PROMISC;
3380 dev->promiscuity += inc;
3381 if (dev->promiscuity == 0) {
3382 /*
3383 * Avoid overflow.
3384 * If inc causes overflow, untouch promisc and return error.
3385 */
3386 if (inc < 0)
3387 dev->flags &= ~IFF_PROMISC;
3388 else {
3389 dev->promiscuity -= inc;
3390 printk(KERN_WARNING "%s: promiscuity touches roof, "
3391 "set promiscuity failed, promiscuity feature "
3392 "of device might be broken.\n", dev->name);
3393 return -EOVERFLOW;
3394 }
3395 }
Patrick McHardy4417da62007-06-27 01:28:10 -07003396 if (dev->flags != old_flags) {
3397 printk(KERN_INFO "device %s %s promiscuous mode\n",
3398 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3399 "left");
David Howells8192b0c2008-11-14 10:39:10 +11003400 if (audit_enabled) {
3401 current_uid_gid(&uid, &gid);
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003402 audit_log(current->audit_context, GFP_ATOMIC,
3403 AUDIT_ANOM_PROMISCUOUS,
3404 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3405 dev->name, (dev->flags & IFF_PROMISC),
3406 (old_flags & IFF_PROMISC),
3407 audit_get_loginuid(current),
David Howells8192b0c2008-11-14 10:39:10 +11003408 uid, gid,
Klaus Heinrich Kiwi7759db82008-01-23 22:57:45 -05003409 audit_get_sessionid(current));
David Howells8192b0c2008-11-14 10:39:10 +11003410 }
Patrick McHardy24023452007-07-14 18:51:31 -07003411
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003412 dev_change_rx_flags(dev, IFF_PROMISC);
Patrick McHardy4417da62007-06-27 01:28:10 -07003413 }
Wang Chendad9b332008-06-18 01:48:28 -07003414 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003415}
3416
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417/**
3418 * dev_set_promiscuity - update promiscuity count on a device
3419 * @dev: device
3420 * @inc: modifier
3421 *
Stephen Hemminger3041a062006-05-26 13:25:24 -07003422 * Add or remove promiscuity from a device. While the count in the device
Linus Torvalds1da177e2005-04-16 15:20:36 -07003423 * remains above zero the interface remains promiscuous. Once it hits zero
3424 * the device reverts back to normal filtering operation. A negative inc
3425 * value is used to drop promiscuity on the device.
Wang Chendad9b332008-06-18 01:48:28 -07003426 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 */
Wang Chendad9b332008-06-18 01:48:28 -07003428int dev_set_promiscuity(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429{
3430 unsigned short old_flags = dev->flags;
Wang Chendad9b332008-06-18 01:48:28 -07003431 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432
Wang Chendad9b332008-06-18 01:48:28 -07003433 err = __dev_set_promiscuity(dev, inc);
Patrick McHardy4b5a6982008-07-06 15:49:08 -07003434 if (err < 0)
Wang Chendad9b332008-06-18 01:48:28 -07003435 return err;
Patrick McHardy4417da62007-06-27 01:28:10 -07003436 if (dev->flags != old_flags)
3437 dev_set_rx_mode(dev);
Wang Chendad9b332008-06-18 01:48:28 -07003438 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003440EXPORT_SYMBOL(dev_set_promiscuity);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441
3442/**
3443 * dev_set_allmulti - update allmulti count on a device
3444 * @dev: device
3445 * @inc: modifier
3446 *
3447 * Add or remove reception of all multicast frames to a device. While the
3448 * count in the device remains above zero the interface remains listening
3449 * to all interfaces. Once it hits zero the device reverts back to normal
3450 * filtering operation. A negative @inc value is used to drop the counter
3451 * when releasing a resource needing all multicasts.
Wang Chendad9b332008-06-18 01:48:28 -07003452 * Return 0 if successful or a negative errno code on error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003453 */
3454
Wang Chendad9b332008-06-18 01:48:28 -07003455int dev_set_allmulti(struct net_device *dev, int inc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456{
3457 unsigned short old_flags = dev->flags;
3458
Patrick McHardy24023452007-07-14 18:51:31 -07003459 ASSERT_RTNL();
3460
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 dev->flags |= IFF_ALLMULTI;
Wang Chendad9b332008-06-18 01:48:28 -07003462 dev->allmulti += inc;
3463 if (dev->allmulti == 0) {
3464 /*
3465 * Avoid overflow.
3466 * If inc causes overflow, untouch allmulti and return error.
3467 */
3468 if (inc < 0)
3469 dev->flags &= ~IFF_ALLMULTI;
3470 else {
3471 dev->allmulti -= inc;
3472 printk(KERN_WARNING "%s: allmulti touches roof, "
3473 "set allmulti failed, allmulti feature of "
3474 "device might be broken.\n", dev->name);
3475 return -EOVERFLOW;
3476 }
3477 }
Patrick McHardy24023452007-07-14 18:51:31 -07003478 if (dev->flags ^ old_flags) {
Patrick McHardyb6c40d62008-10-07 15:26:48 -07003479 dev_change_rx_flags(dev, IFF_ALLMULTI);
Patrick McHardy4417da62007-06-27 01:28:10 -07003480 dev_set_rx_mode(dev);
Patrick McHardy24023452007-07-14 18:51:31 -07003481 }
Wang Chendad9b332008-06-18 01:48:28 -07003482 return 0;
Patrick McHardy4417da62007-06-27 01:28:10 -07003483}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07003484EXPORT_SYMBOL(dev_set_allmulti);
Patrick McHardy4417da62007-06-27 01:28:10 -07003485
3486/*
3487 * Upload unicast and multicast address lists to device and
3488 * configure RX filtering. When the device doesn't support unicast
Joe Perches53ccaae2007-12-20 14:02:06 -08003489 * filtering it is put in promiscuous mode while unicast addresses
Patrick McHardy4417da62007-06-27 01:28:10 -07003490 * are present.
3491 */
3492void __dev_set_rx_mode(struct net_device *dev)
3493{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003494 const struct net_device_ops *ops = dev->netdev_ops;
3495
Patrick McHardy4417da62007-06-27 01:28:10 -07003496 /* dev_open will call this function so the list will stay sane. */
3497 if (!(dev->flags&IFF_UP))
3498 return;
3499
3500 if (!netif_device_present(dev))
YOSHIFUJI Hideaki40b77c92007-07-19 10:43:23 +09003501 return;
Patrick McHardy4417da62007-06-27 01:28:10 -07003502
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003503 if (ops->ndo_set_rx_mode)
3504 ops->ndo_set_rx_mode(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003505 else {
3506 /* Unicast addresses changes may only happen under the rtnl,
3507 * therefore calling __dev_set_promiscuity here is safe.
3508 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003509 if (dev->uc.count > 0 && !dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003510 __dev_set_promiscuity(dev, 1);
3511 dev->uc_promisc = 1;
Jiri Pirko31278e72009-06-17 01:12:19 +00003512 } else if (dev->uc.count == 0 && dev->uc_promisc) {
Patrick McHardy4417da62007-06-27 01:28:10 -07003513 __dev_set_promiscuity(dev, -1);
3514 dev->uc_promisc = 0;
3515 }
3516
Stephen Hemmingerd3147742008-11-19 21:32:24 -08003517 if (ops->ndo_set_multicast_list)
3518 ops->ndo_set_multicast_list(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003519 }
3520}
3521
3522void dev_set_rx_mode(struct net_device *dev)
3523{
David S. Millerb9e40852008-07-15 00:15:08 -07003524 netif_addr_lock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003525 __dev_set_rx_mode(dev);
David S. Millerb9e40852008-07-15 00:15:08 -07003526 netif_addr_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527}
3528
Jiri Pirkof001fde2009-05-05 02:48:28 +00003529/* hw addresses list handling functions */
3530
Jiri Pirko31278e72009-06-17 01:12:19 +00003531static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3532 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003533{
3534 struct netdev_hw_addr *ha;
3535 int alloc_size;
3536
3537 if (addr_len > MAX_ADDR_LEN)
3538 return -EINVAL;
3539
Jiri Pirko31278e72009-06-17 01:12:19 +00003540 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003541 if (!memcmp(ha->addr, addr, addr_len) &&
3542 ha->type == addr_type) {
3543 ha->refcount++;
3544 return 0;
3545 }
3546 }
3547
3548
Jiri Pirkof001fde2009-05-05 02:48:28 +00003549 alloc_size = sizeof(*ha);
3550 if (alloc_size < L1_CACHE_BYTES)
3551 alloc_size = L1_CACHE_BYTES;
3552 ha = kmalloc(alloc_size, GFP_ATOMIC);
3553 if (!ha)
3554 return -ENOMEM;
3555 memcpy(ha->addr, addr, addr_len);
3556 ha->type = addr_type;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003557 ha->refcount = 1;
3558 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003559 list_add_tail_rcu(&ha->list, &list->list);
3560 list->count++;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003561 return 0;
3562}
3563
3564static void ha_rcu_free(struct rcu_head *head)
3565{
3566 struct netdev_hw_addr *ha;
3567
3568 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3569 kfree(ha);
3570}
3571
Jiri Pirko31278e72009-06-17 01:12:19 +00003572static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3573 int addr_len, unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003574{
3575 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003576
Jiri Pirko31278e72009-06-17 01:12:19 +00003577 list_for_each_entry(ha, &list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003578 if (!memcmp(ha->addr, addr, addr_len) &&
Jiri Pirkof001fde2009-05-05 02:48:28 +00003579 (ha->type == addr_type || !addr_type)) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003580 if (--ha->refcount)
3581 return 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003582 list_del_rcu(&ha->list);
3583 call_rcu(&ha->rcu_head, ha_rcu_free);
Jiri Pirko31278e72009-06-17 01:12:19 +00003584 list->count--;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003585 return 0;
3586 }
3587 }
3588 return -ENOENT;
3589}
3590
Jiri Pirko31278e72009-06-17 01:12:19 +00003591static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3592 struct netdev_hw_addr_list *from_list,
3593 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003594 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003595{
3596 int err;
3597 struct netdev_hw_addr *ha, *ha2;
3598 unsigned char type;
3599
Jiri Pirko31278e72009-06-17 01:12:19 +00003600 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003601 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003602 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003603 if (err)
3604 goto unroll;
3605 }
3606 return 0;
3607
3608unroll:
Jiri Pirko31278e72009-06-17 01:12:19 +00003609 list_for_each_entry(ha2, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003610 if (ha2 == ha)
3611 break;
3612 type = addr_type ? addr_type : ha2->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003613 __hw_addr_del(to_list, ha2->addr, addr_len, type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003614 }
3615 return err;
3616}
3617
Jiri Pirko31278e72009-06-17 01:12:19 +00003618static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3619 struct netdev_hw_addr_list *from_list,
3620 int addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003621 unsigned char addr_type)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003622{
3623 struct netdev_hw_addr *ha;
3624 unsigned char type;
3625
Jiri Pirko31278e72009-06-17 01:12:19 +00003626 list_for_each_entry(ha, &from_list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003627 type = addr_type ? addr_type : ha->type;
Jiri Pirko31278e72009-06-17 01:12:19 +00003628 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003629 }
3630}
3631
Jiri Pirko31278e72009-06-17 01:12:19 +00003632static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3633 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003634 int addr_len)
3635{
3636 int err = 0;
3637 struct netdev_hw_addr *ha, *tmp;
3638
Jiri Pirko31278e72009-06-17 01:12:19 +00003639 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003640 if (!ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003641 err = __hw_addr_add(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003642 addr_len, ha->type);
3643 if (err)
3644 break;
3645 ha->synced = true;
3646 ha->refcount++;
3647 } else if (ha->refcount == 1) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003648 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3649 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003650 }
3651 }
3652 return err;
3653}
3654
Jiri Pirko31278e72009-06-17 01:12:19 +00003655static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3656 struct netdev_hw_addr_list *from_list,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003657 int addr_len)
3658{
3659 struct netdev_hw_addr *ha, *tmp;
3660
Jiri Pirko31278e72009-06-17 01:12:19 +00003661 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00003662 if (ha->synced) {
Jiri Pirko31278e72009-06-17 01:12:19 +00003663 __hw_addr_del(to_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003664 addr_len, ha->type);
3665 ha->synced = false;
Jiri Pirko31278e72009-06-17 01:12:19 +00003666 __hw_addr_del(from_list, ha->addr,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003667 addr_len, ha->type);
3668 }
3669 }
3670}
3671
Jiri Pirko31278e72009-06-17 01:12:19 +00003672static void __hw_addr_flush(struct netdev_hw_addr_list *list)
Jiri Pirkof001fde2009-05-05 02:48:28 +00003673{
3674 struct netdev_hw_addr *ha, *tmp;
3675
Jiri Pirko31278e72009-06-17 01:12:19 +00003676 list_for_each_entry_safe(ha, tmp, &list->list, list) {
Jiri Pirkof001fde2009-05-05 02:48:28 +00003677 list_del_rcu(&ha->list);
3678 call_rcu(&ha->rcu_head, ha_rcu_free);
3679 }
Jiri Pirko31278e72009-06-17 01:12:19 +00003680 list->count = 0;
3681}
3682
3683static void __hw_addr_init(struct netdev_hw_addr_list *list)
3684{
3685 INIT_LIST_HEAD(&list->list);
3686 list->count = 0;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003687}
3688
3689/* Device addresses handling functions */
3690
3691static void dev_addr_flush(struct net_device *dev)
3692{
3693 /* rtnl_mutex must be held here */
3694
Jiri Pirko31278e72009-06-17 01:12:19 +00003695 __hw_addr_flush(&dev->dev_addrs);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003696 dev->dev_addr = NULL;
3697}
3698
3699static int dev_addr_init(struct net_device *dev)
3700{
3701 unsigned char addr[MAX_ADDR_LEN];
3702 struct netdev_hw_addr *ha;
3703 int err;
3704
3705 /* rtnl_mutex must be held here */
3706
Jiri Pirko31278e72009-06-17 01:12:19 +00003707 __hw_addr_init(&dev->dev_addrs);
Eric Dumazet0c279222009-06-08 03:49:24 +00003708 memset(addr, 0, sizeof(addr));
Jiri Pirko31278e72009-06-17 01:12:19 +00003709 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
Jiri Pirkof001fde2009-05-05 02:48:28 +00003710 NETDEV_HW_ADDR_T_LAN);
3711 if (!err) {
3712 /*
3713 * Get the first (previously created) address from the list
3714 * and set dev_addr pointer to this location.
3715 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003716 ha = list_first_entry(&dev->dev_addrs.list,
Jiri Pirkof001fde2009-05-05 02:48:28 +00003717 struct netdev_hw_addr, list);
3718 dev->dev_addr = ha->addr;
3719 }
3720 return err;
3721}
3722
3723/**
3724 * dev_addr_add - Add a device address
3725 * @dev: device
3726 * @addr: address to add
3727 * @addr_type: address type
3728 *
3729 * Add a device address to the device or increase the reference count if
3730 * it already exists.
3731 *
3732 * The caller must hold the rtnl_mutex.
3733 */
3734int dev_addr_add(struct net_device *dev, unsigned char *addr,
3735 unsigned char addr_type)
3736{
3737 int err;
3738
3739 ASSERT_RTNL();
3740
Jiri Pirko31278e72009-06-17 01:12:19 +00003741 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003742 if (!err)
3743 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3744 return err;
3745}
3746EXPORT_SYMBOL(dev_addr_add);
3747
3748/**
3749 * dev_addr_del - Release a device address.
3750 * @dev: device
3751 * @addr: address to delete
3752 * @addr_type: address type
3753 *
3754 * Release reference to a device address and remove it from the device
3755 * if the reference count drops to zero.
3756 *
3757 * The caller must hold the rtnl_mutex.
3758 */
3759int dev_addr_del(struct net_device *dev, unsigned char *addr,
3760 unsigned char addr_type)
3761{
3762 int err;
Jiri Pirkoccffad252009-05-22 23:22:17 +00003763 struct netdev_hw_addr *ha;
Jiri Pirkof001fde2009-05-05 02:48:28 +00003764
3765 ASSERT_RTNL();
3766
Jiri Pirkoccffad252009-05-22 23:22:17 +00003767 /*
3768 * We can not remove the first address from the list because
3769 * dev->dev_addr points to that.
3770 */
Jiri Pirko31278e72009-06-17 01:12:19 +00003771 ha = list_first_entry(&dev->dev_addrs.list,
3772 struct netdev_hw_addr, list);
Jiri Pirkoccffad252009-05-22 23:22:17 +00003773 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3774 return -ENOENT;
3775
Jiri Pirko31278e72009-06-17 01:12:19 +00003776 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003777 addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003778 if (!err)
3779 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3780 return err;
3781}
3782EXPORT_SYMBOL(dev_addr_del);
3783
3784/**
3785 * dev_addr_add_multiple - Add device addresses from another device
3786 * @to_dev: device to which addresses will be added
3787 * @from_dev: device from which addresses will be added
3788 * @addr_type: address type - 0 means type will be used from from_dev
3789 *
3790 * Add device addresses of the one device to another.
3791 **
3792 * The caller must hold the rtnl_mutex.
3793 */
3794int dev_addr_add_multiple(struct net_device *to_dev,
3795 struct net_device *from_dev,
3796 unsigned char addr_type)
3797{
3798 int err;
3799
3800 ASSERT_RTNL();
3801
3802 if (from_dev->addr_len != to_dev->addr_len)
3803 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003804 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003805 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003806 if (!err)
3807 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3808 return err;
3809}
3810EXPORT_SYMBOL(dev_addr_add_multiple);
3811
3812/**
3813 * dev_addr_del_multiple - Delete device addresses by another device
3814 * @to_dev: device where the addresses will be deleted
3815 * @from_dev: device by which addresses the addresses will be deleted
3816 * @addr_type: address type - 0 means type will used from from_dev
3817 *
3818 * Deletes addresses in to device by the list of addresses in from device.
3819 *
3820 * The caller must hold the rtnl_mutex.
3821 */
3822int dev_addr_del_multiple(struct net_device *to_dev,
3823 struct net_device *from_dev,
3824 unsigned char addr_type)
3825{
3826 ASSERT_RTNL();
3827
3828 if (from_dev->addr_len != to_dev->addr_len)
3829 return -EINVAL;
Jiri Pirko31278e72009-06-17 01:12:19 +00003830 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
Jiri Pirkoccffad252009-05-22 23:22:17 +00003831 to_dev->addr_len, addr_type);
Jiri Pirkof001fde2009-05-05 02:48:28 +00003832 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
3833 return 0;
3834}
3835EXPORT_SYMBOL(dev_addr_del_multiple);
3836
Jiri Pirko31278e72009-06-17 01:12:19 +00003837/* multicast addresses handling functions */
Jiri Pirkof001fde2009-05-05 02:48:28 +00003838
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003839int __dev_addr_delete(struct dev_addr_list **list, int *count,
3840 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003841{
3842 struct dev_addr_list *da;
3843
3844 for (; (da = *list) != NULL; list = &da->next) {
3845 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3846 alen == da->da_addrlen) {
3847 if (glbl) {
3848 int old_glbl = da->da_gusers;
3849 da->da_gusers = 0;
3850 if (old_glbl == 0)
3851 break;
3852 }
3853 if (--da->da_users)
3854 return 0;
3855
3856 *list = da->next;
3857 kfree(da);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003858 (*count)--;
Patrick McHardybf742482007-06-27 01:26:19 -07003859 return 0;
3860 }
3861 }
3862 return -ENOENT;
3863}
3864
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003865int __dev_addr_add(struct dev_addr_list **list, int *count,
3866 void *addr, int alen, int glbl)
Patrick McHardybf742482007-06-27 01:26:19 -07003867{
3868 struct dev_addr_list *da;
3869
3870 for (da = *list; da != NULL; da = da->next) {
3871 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3872 da->da_addrlen == alen) {
3873 if (glbl) {
3874 int old_glbl = da->da_gusers;
3875 da->da_gusers = 1;
3876 if (old_glbl)
3877 return 0;
3878 }
3879 da->da_users++;
3880 return 0;
3881 }
3882 }
3883
Jorge Boncompte [DTI2]12aa3432008-02-19 14:17:04 -08003884 da = kzalloc(sizeof(*da), GFP_ATOMIC);
Patrick McHardybf742482007-06-27 01:26:19 -07003885 if (da == NULL)
3886 return -ENOMEM;
3887 memcpy(da->da_addr, addr, alen);
3888 da->da_addrlen = alen;
3889 da->da_users = 1;
3890 da->da_gusers = glbl ? 1 : 0;
3891 da->next = *list;
3892 *list = da;
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003893 (*count)++;
Patrick McHardybf742482007-06-27 01:26:19 -07003894 return 0;
3895}
3896
Patrick McHardy4417da62007-06-27 01:28:10 -07003897/**
3898 * dev_unicast_delete - Release secondary unicast address.
3899 * @dev: device
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003900 * @addr: address to delete
Patrick McHardy4417da62007-06-27 01:28:10 -07003901 *
3902 * Release reference to a secondary unicast address and remove it
Randy Dunlap0ed72ec2007-07-26 00:03:29 -07003903 * from the device if the reference count drops to zero.
Patrick McHardy4417da62007-06-27 01:28:10 -07003904 *
3905 * The caller must hold the rtnl_mutex.
3906 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003907int dev_unicast_delete(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003908{
3909 int err;
3910
3911 ASSERT_RTNL();
3912
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003913 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003914 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
3915 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003916 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003917 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003918 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003919 return err;
3920}
3921EXPORT_SYMBOL(dev_unicast_delete);
3922
3923/**
3924 * dev_unicast_add - add a secondary unicast address
3925 * @dev: device
Wang Chen5dbaec52008-06-27 19:35:16 -07003926 * @addr: address to add
Patrick McHardy4417da62007-06-27 01:28:10 -07003927 *
3928 * Add a secondary unicast address to the device or increase
3929 * the reference count if it already exists.
3930 *
3931 * The caller must hold the rtnl_mutex.
3932 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00003933int dev_unicast_add(struct net_device *dev, void *addr)
Patrick McHardy4417da62007-06-27 01:28:10 -07003934{
3935 int err;
3936
3937 ASSERT_RTNL();
3938
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003939 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00003940 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
3941 NETDEV_HW_ADDR_T_UNICAST);
Patrick McHardy61cbc2f2007-06-30 13:35:52 -07003942 if (!err)
Patrick McHardy4417da62007-06-27 01:28:10 -07003943 __dev_set_rx_mode(dev);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00003944 netif_addr_unlock_bh(dev);
Patrick McHardy4417da62007-06-27 01:28:10 -07003945 return err;
3946}
3947EXPORT_SYMBOL(dev_unicast_add);
3948
Chris Leeche83a2ea2008-01-31 16:53:23 -08003949int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3950 struct dev_addr_list **from, int *from_count)
3951{
3952 struct dev_addr_list *da, *next;
3953 int err = 0;
3954
3955 da = *from;
3956 while (da != NULL) {
3957 next = da->next;
3958 if (!da->da_synced) {
3959 err = __dev_addr_add(to, to_count,
3960 da->da_addr, da->da_addrlen, 0);
3961 if (err < 0)
3962 break;
3963 da->da_synced = 1;
3964 da->da_users++;
3965 } else if (da->da_users == 1) {
3966 __dev_addr_delete(to, to_count,
3967 da->da_addr, da->da_addrlen, 0);
3968 __dev_addr_delete(from, from_count,
3969 da->da_addr, da->da_addrlen, 0);
3970 }
3971 da = next;
3972 }
3973 return err;
3974}
Johannes Bergc4029082009-06-17 17:43:30 +02003975EXPORT_SYMBOL_GPL(__dev_addr_sync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003976
3977void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3978 struct dev_addr_list **from, int *from_count)
3979{
3980 struct dev_addr_list *da, *next;
3981
3982 da = *from;
3983 while (da != NULL) {
3984 next = da->next;
3985 if (da->da_synced) {
3986 __dev_addr_delete(to, to_count,
3987 da->da_addr, da->da_addrlen, 0);
3988 da->da_synced = 0;
3989 __dev_addr_delete(from, from_count,
3990 da->da_addr, da->da_addrlen, 0);
3991 }
3992 da = next;
3993 }
3994}
Johannes Bergc4029082009-06-17 17:43:30 +02003995EXPORT_SYMBOL_GPL(__dev_addr_unsync);
Chris Leeche83a2ea2008-01-31 16:53:23 -08003996
3997/**
3998 * dev_unicast_sync - Synchronize device's unicast list to another device
3999 * @to: destination device
4000 * @from: source device
4001 *
4002 * Add newly added addresses to the destination device and release
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004003 * addresses that have no users left. The source device must be
4004 * locked by netif_tx_lock_bh.
Chris Leeche83a2ea2008-01-31 16:53:23 -08004005 *
4006 * This function is intended to be called from the dev->set_rx_mode
4007 * function of layered software devices.
4008 */
4009int dev_unicast_sync(struct net_device *to, struct net_device *from)
4010{
4011 int err = 0;
4012
Jiri Pirkoccffad252009-05-22 23:22:17 +00004013 if (to->addr_len != from->addr_len)
4014 return -EINVAL;
4015
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004016 netif_addr_lock_bh(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004017 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004018 if (!err)
4019 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004020 netif_addr_unlock_bh(to);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004021 return err;
4022}
4023EXPORT_SYMBOL(dev_unicast_sync);
4024
4025/**
Randy Dunlapbc2cda12008-02-13 15:03:25 -08004026 * dev_unicast_unsync - Remove synchronized addresses from the destination device
Chris Leeche83a2ea2008-01-31 16:53:23 -08004027 * @to: destination device
4028 * @from: source device
4029 *
4030 * Remove all addresses that were added to the destination device by
4031 * dev_unicast_sync(). This function is intended to be called from the
4032 * dev->stop function of layered software devices.
4033 */
4034void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4035{
Jiri Pirkoccffad252009-05-22 23:22:17 +00004036 if (to->addr_len != from->addr_len)
4037 return;
4038
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004039 netif_addr_lock_bh(from);
4040 netif_addr_lock(to);
Jiri Pirko31278e72009-06-17 01:12:19 +00004041 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004042 __dev_set_rx_mode(to);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004043 netif_addr_unlock(to);
4044 netif_addr_unlock_bh(from);
Chris Leeche83a2ea2008-01-31 16:53:23 -08004045}
4046EXPORT_SYMBOL(dev_unicast_unsync);
4047
Jiri Pirkoccffad252009-05-22 23:22:17 +00004048static void dev_unicast_flush(struct net_device *dev)
4049{
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004050 netif_addr_lock_bh(dev);
Jiri Pirko31278e72009-06-17 01:12:19 +00004051 __hw_addr_flush(&dev->uc);
Jiri Pirkoa6ac65d2009-07-30 01:06:12 +00004052 netif_addr_unlock_bh(dev);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004053}
4054
4055static void dev_unicast_init(struct net_device *dev)
4056{
Jiri Pirko31278e72009-06-17 01:12:19 +00004057 __hw_addr_init(&dev->uc);
Jiri Pirkoccffad252009-05-22 23:22:17 +00004058}
4059
4060
Denis Cheng12972622007-07-18 02:12:56 -07004061static void __dev_addr_discard(struct dev_addr_list **list)
4062{
4063 struct dev_addr_list *tmp;
4064
4065 while (*list != NULL) {
4066 tmp = *list;
4067 *list = tmp->next;
4068 if (tmp->da_users > tmp->da_gusers)
4069 printk("__dev_addr_discard: address leakage! "
4070 "da_users=%d\n", tmp->da_users);
4071 kfree(tmp);
4072 }
4073}
4074
Denis Cheng26cc2522007-07-18 02:12:03 -07004075static void dev_addr_discard(struct net_device *dev)
Patrick McHardy4417da62007-06-27 01:28:10 -07004076{
David S. Millerb9e40852008-07-15 00:15:08 -07004077 netif_addr_lock_bh(dev);
Denis Cheng26cc2522007-07-18 02:12:03 -07004078
Denis Cheng456ad752007-07-18 02:10:54 -07004079 __dev_addr_discard(&dev->mc_list);
4080 dev->mc_count = 0;
Denis Cheng26cc2522007-07-18 02:12:03 -07004081
David S. Millerb9e40852008-07-15 00:15:08 -07004082 netif_addr_unlock_bh(dev);
Denis Cheng456ad752007-07-18 02:10:54 -07004083}
4084
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004085/**
4086 * dev_get_flags - get flags reported to userspace
4087 * @dev: device
4088 *
4089 * Get the combination of flag bits exported through APIs to userspace.
4090 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091unsigned dev_get_flags(const struct net_device *dev)
4092{
4093 unsigned flags;
4094
4095 flags = (dev->flags & ~(IFF_PROMISC |
4096 IFF_ALLMULTI |
Stefan Rompfb00055a2006-03-20 17:09:11 -08004097 IFF_RUNNING |
4098 IFF_LOWER_UP |
4099 IFF_DORMANT)) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100 (dev->gflags & (IFF_PROMISC |
4101 IFF_ALLMULTI));
4102
Stefan Rompfb00055a2006-03-20 17:09:11 -08004103 if (netif_running(dev)) {
4104 if (netif_oper_up(dev))
4105 flags |= IFF_RUNNING;
4106 if (netif_carrier_ok(dev))
4107 flags |= IFF_LOWER_UP;
4108 if (netif_dormant(dev))
4109 flags |= IFF_DORMANT;
4110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
4112 return flags;
4113}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004114EXPORT_SYMBOL(dev_get_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004116/**
4117 * dev_change_flags - change device settings
4118 * @dev: device
4119 * @flags: device state flags
4120 *
4121 * Change settings on device based state flags. The flags are
4122 * in the userspace exported format.
4123 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124int dev_change_flags(struct net_device *dev, unsigned flags)
4125{
Thomas Graf7c355f52007-06-05 16:03:03 -07004126 int ret, changes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 int old_flags = dev->flags;
4128
Patrick McHardy24023452007-07-14 18:51:31 -07004129 ASSERT_RTNL();
4130
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 /*
4132 * Set the flags on our device.
4133 */
4134
4135 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4136 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4137 IFF_AUTOMEDIA)) |
4138 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4139 IFF_ALLMULTI));
4140
4141 /*
4142 * Load in the correct multicast list now the flags have changed.
4143 */
4144
Patrick McHardyb6c40d62008-10-07 15:26:48 -07004145 if ((old_flags ^ flags) & IFF_MULTICAST)
4146 dev_change_rx_flags(dev, IFF_MULTICAST);
Patrick McHardy24023452007-07-14 18:51:31 -07004147
Patrick McHardy4417da62007-06-27 01:28:10 -07004148 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
4150 /*
4151 * Have we downed the interface. We handle IFF_UP ourselves
4152 * according to user attempts to set it, rather than blindly
4153 * setting it.
4154 */
4155
4156 ret = 0;
4157 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4158 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
4159
4160 if (!ret)
Patrick McHardy4417da62007-06-27 01:28:10 -07004161 dev_set_rx_mode(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 }
4163
4164 if (dev->flags & IFF_UP &&
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004165 ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 IFF_VOLATILE)))
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004167 call_netdevice_notifiers(NETDEV_CHANGE, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168
4169 if ((flags ^ dev->gflags) & IFF_PROMISC) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004170 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4171
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172 dev->gflags ^= IFF_PROMISC;
4173 dev_set_promiscuity(dev, inc);
4174 }
4175
4176 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4177 is important. Some (broken) drivers set IFF_PROMISC, when
4178 IFF_ALLMULTI is requested not asking us and not reporting.
4179 */
4180 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004181 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 dev->gflags ^= IFF_ALLMULTI;
4184 dev_set_allmulti(dev, inc);
4185 }
4186
Thomas Graf7c355f52007-06-05 16:03:03 -07004187 /* Exclude state transition flags, already notified */
4188 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
4189 if (changes)
4190 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191
4192 return ret;
4193}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004194EXPORT_SYMBOL(dev_change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004196/**
4197 * dev_set_mtu - Change maximum transfer unit
4198 * @dev: device
4199 * @new_mtu: new transfer unit
4200 *
4201 * Change the maximum transfer size of the network device.
4202 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203int dev_set_mtu(struct net_device *dev, int new_mtu)
4204{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004205 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 int err;
4207
4208 if (new_mtu == dev->mtu)
4209 return 0;
4210
4211 /* MTU must be positive. */
4212 if (new_mtu < 0)
4213 return -EINVAL;
4214
4215 if (!netif_device_present(dev))
4216 return -ENODEV;
4217
4218 err = 0;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004219 if (ops->ndo_change_mtu)
4220 err = ops->ndo_change_mtu(dev, new_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221 else
4222 dev->mtu = new_mtu;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004223
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224 if (!err && dev->flags & IFF_UP)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004225 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 return err;
4227}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004228EXPORT_SYMBOL(dev_set_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07004230/**
4231 * dev_set_mac_address - Change Media Access Control Address
4232 * @dev: device
4233 * @sa: new address
4234 *
4235 * Change the hardware (MAC) address of the device
4236 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4238{
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004239 const struct net_device_ops *ops = dev->netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 int err;
4241
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004242 if (!ops->ndo_set_mac_address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 return -EOPNOTSUPP;
4244 if (sa->sa_family != dev->type)
4245 return -EINVAL;
4246 if (!netif_device_present(dev))
4247 return -ENODEV;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004248 err = ops->ndo_set_mac_address(dev, sa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 if (!err)
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004250 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251 return err;
4252}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004253EXPORT_SYMBOL(dev_set_mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
4255/*
Jeff Garzik14e3e072007-10-08 00:06:32 -07004256 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257 */
Jeff Garzik14e3e072007-10-08 00:06:32 -07004258static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259{
4260 int err;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004261 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262
4263 if (!dev)
4264 return -ENODEV;
4265
4266 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004267 case SIOCGIFFLAGS: /* Get interface flags */
4268 ifr->ifr_flags = (short) dev_get_flags(dev);
4269 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004271 case SIOCGIFMETRIC: /* Get the metric on the interface
4272 (currently unused) */
4273 ifr->ifr_metric = 0;
4274 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004276 case SIOCGIFMTU: /* Get the MTU of a device */
4277 ifr->ifr_mtu = dev->mtu;
4278 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004280 case SIOCGIFHWADDR:
4281 if (!dev->addr_len)
4282 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4283 else
4284 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4285 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4286 ifr->ifr_hwaddr.sa_family = dev->type;
4287 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004289 case SIOCGIFSLAVE:
4290 err = -EINVAL;
4291 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004292
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004293 case SIOCGIFMAP:
4294 ifr->ifr_map.mem_start = dev->mem_start;
4295 ifr->ifr_map.mem_end = dev->mem_end;
4296 ifr->ifr_map.base_addr = dev->base_addr;
4297 ifr->ifr_map.irq = dev->irq;
4298 ifr->ifr_map.dma = dev->dma;
4299 ifr->ifr_map.port = dev->if_port;
4300 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004301
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004302 case SIOCGIFINDEX:
4303 ifr->ifr_ifindex = dev->ifindex;
4304 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004305
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004306 case SIOCGIFTXQLEN:
4307 ifr->ifr_qlen = dev->tx_queue_len;
4308 return 0;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004309
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004310 default:
4311 /* dev_ioctl() should ensure this case
4312 * is never reached
4313 */
4314 WARN_ON(1);
4315 err = -EINVAL;
4316 break;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004317
4318 }
4319 return err;
4320}
4321
4322/*
4323 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4324 */
4325static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4326{
4327 int err;
4328 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004329 const struct net_device_ops *ops;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004330
4331 if (!dev)
4332 return -ENODEV;
4333
Jarek Poplawski5f2f6da2008-12-22 19:35:28 -08004334 ops = dev->netdev_ops;
4335
Jeff Garzik14e3e072007-10-08 00:06:32 -07004336 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004337 case SIOCSIFFLAGS: /* Set interface flags */
4338 return dev_change_flags(dev, ifr->ifr_flags);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004339
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004340 case SIOCSIFMETRIC: /* Set the metric on the interface
4341 (currently unused) */
4342 return -EOPNOTSUPP;
Jeff Garzik14e3e072007-10-08 00:06:32 -07004343
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004344 case SIOCSIFMTU: /* Set the MTU of a device */
4345 return dev_set_mtu(dev, ifr->ifr_mtu);
Jeff Garzik14e3e072007-10-08 00:06:32 -07004346
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004347 case SIOCSIFHWADDR:
4348 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004349
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004350 case SIOCSIFHWBROADCAST:
4351 if (ifr->ifr_hwaddr.sa_family != dev->type)
4352 return -EINVAL;
4353 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4354 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4355 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4356 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004358 case SIOCSIFMAP:
4359 if (ops->ndo_set_config) {
4360 if (!netif_device_present(dev))
4361 return -ENODEV;
4362 return ops->ndo_set_config(dev, &ifr->ifr_map);
4363 }
4364 return -EOPNOTSUPP;
4365
4366 case SIOCADDMULTI:
4367 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4368 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4369 return -EINVAL;
4370 if (!netif_device_present(dev))
4371 return -ENODEV;
4372 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4373 dev->addr_len, 1);
4374
4375 case SIOCDELMULTI:
4376 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4377 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4378 return -EINVAL;
4379 if (!netif_device_present(dev))
4380 return -ENODEV;
4381 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4382 dev->addr_len, 1);
4383
4384 case SIOCSIFTXQLEN:
4385 if (ifr->ifr_qlen < 0)
4386 return -EINVAL;
4387 dev->tx_queue_len = ifr->ifr_qlen;
4388 return 0;
4389
4390 case SIOCSIFNAME:
4391 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4392 return dev_change_name(dev, ifr->ifr_newname);
4393
4394 /*
4395 * Unknown or private ioctl
4396 */
4397 default:
4398 if ((cmd >= SIOCDEVPRIVATE &&
4399 cmd <= SIOCDEVPRIVATE + 15) ||
4400 cmd == SIOCBONDENSLAVE ||
4401 cmd == SIOCBONDRELEASE ||
4402 cmd == SIOCBONDSETHWADDR ||
4403 cmd == SIOCBONDSLAVEINFOQUERY ||
4404 cmd == SIOCBONDINFOQUERY ||
4405 cmd == SIOCBONDCHANGEACTIVE ||
4406 cmd == SIOCGMIIPHY ||
4407 cmd == SIOCGMIIREG ||
4408 cmd == SIOCSMIIREG ||
4409 cmd == SIOCBRADDIF ||
4410 cmd == SIOCBRDELIF ||
4411 cmd == SIOCSHWTSTAMP ||
4412 cmd == SIOCWANDEV) {
4413 err = -EOPNOTSUPP;
4414 if (ops->ndo_do_ioctl) {
4415 if (netif_device_present(dev))
4416 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4417 else
4418 err = -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 }
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004420 } else
4421 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
4423 }
4424 return err;
4425}
4426
4427/*
4428 * This function handles all "interface"-type I/O control requests. The actual
4429 * 'doing' part of this is dev_ifsioc above.
4430 */
4431
4432/**
4433 * dev_ioctl - network device ioctl
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004434 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 * @cmd: command to issue
4436 * @arg: pointer to a struct ifreq in user space
4437 *
4438 * Issue ioctl functions to devices. This is normally called by the
4439 * user space syscall interfaces but can sometimes be useful for
4440 * other purposes. The return value is the return from the syscall if
4441 * positive or a negative errno code on error.
4442 */
4443
Eric W. Biederman881d9662007-09-17 11:56:21 -07004444int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445{
4446 struct ifreq ifr;
4447 int ret;
4448 char *colon;
4449
4450 /* One special case: SIOCGIFCONF takes ifconf argument
4451 and requires shared lock, because it sleeps writing
4452 to user space.
4453 */
4454
4455 if (cmd == SIOCGIFCONF) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004456 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004457 ret = dev_ifconf(net, (char __user *) arg);
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004458 rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459 return ret;
4460 }
4461 if (cmd == SIOCGIFNAME)
Eric W. Biederman881d9662007-09-17 11:56:21 -07004462 return dev_ifname(net, (struct ifreq __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
4464 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4465 return -EFAULT;
4466
4467 ifr.ifr_name[IFNAMSIZ-1] = 0;
4468
4469 colon = strchr(ifr.ifr_name, ':');
4470 if (colon)
4471 *colon = 0;
4472
4473 /*
4474 * See which interface the caller is talking about.
4475 */
4476
4477 switch (cmd) {
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004478 /*
4479 * These ioctl calls:
4480 * - can be done by all.
4481 * - atomic and do not require locking.
4482 * - return a value
4483 */
4484 case SIOCGIFFLAGS:
4485 case SIOCGIFMETRIC:
4486 case SIOCGIFMTU:
4487 case SIOCGIFHWADDR:
4488 case SIOCGIFSLAVE:
4489 case SIOCGIFMAP:
4490 case SIOCGIFINDEX:
4491 case SIOCGIFTXQLEN:
4492 dev_load(net, ifr.ifr_name);
4493 read_lock(&dev_base_lock);
4494 ret = dev_ifsioc_locked(net, &ifr, cmd);
4495 read_unlock(&dev_base_lock);
4496 if (!ret) {
4497 if (colon)
4498 *colon = ':';
4499 if (copy_to_user(arg, &ifr,
4500 sizeof(struct ifreq)))
4501 ret = -EFAULT;
4502 }
4503 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004505 case SIOCETHTOOL:
4506 dev_load(net, ifr.ifr_name);
4507 rtnl_lock();
4508 ret = dev_ethtool(net, &ifr);
4509 rtnl_unlock();
4510 if (!ret) {
4511 if (colon)
4512 *colon = ':';
4513 if (copy_to_user(arg, &ifr,
4514 sizeof(struct ifreq)))
4515 ret = -EFAULT;
4516 }
4517 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004519 /*
4520 * These ioctl calls:
4521 * - require superuser power.
4522 * - require strict serialization.
4523 * - return a value
4524 */
4525 case SIOCGMIIPHY:
4526 case SIOCGMIIREG:
4527 case SIOCSIFNAME:
4528 if (!capable(CAP_NET_ADMIN))
4529 return -EPERM;
4530 dev_load(net, ifr.ifr_name);
4531 rtnl_lock();
4532 ret = dev_ifsioc(net, &ifr, cmd);
4533 rtnl_unlock();
4534 if (!ret) {
4535 if (colon)
4536 *colon = ':';
4537 if (copy_to_user(arg, &ifr,
4538 sizeof(struct ifreq)))
4539 ret = -EFAULT;
4540 }
4541 return ret;
4542
4543 /*
4544 * These ioctl calls:
4545 * - require superuser power.
4546 * - require strict serialization.
4547 * - do not return a value
4548 */
4549 case SIOCSIFFLAGS:
4550 case SIOCSIFMETRIC:
4551 case SIOCSIFMTU:
4552 case SIOCSIFMAP:
4553 case SIOCSIFHWADDR:
4554 case SIOCSIFSLAVE:
4555 case SIOCADDMULTI:
4556 case SIOCDELMULTI:
4557 case SIOCSIFHWBROADCAST:
4558 case SIOCSIFTXQLEN:
4559 case SIOCSMIIREG:
4560 case SIOCBONDENSLAVE:
4561 case SIOCBONDRELEASE:
4562 case SIOCBONDSETHWADDR:
4563 case SIOCBONDCHANGEACTIVE:
4564 case SIOCBRADDIF:
4565 case SIOCBRDELIF:
4566 case SIOCSHWTSTAMP:
4567 if (!capable(CAP_NET_ADMIN))
4568 return -EPERM;
4569 /* fall through */
4570 case SIOCBONDSLAVEINFOQUERY:
4571 case SIOCBONDINFOQUERY:
4572 dev_load(net, ifr.ifr_name);
4573 rtnl_lock();
4574 ret = dev_ifsioc(net, &ifr, cmd);
4575 rtnl_unlock();
4576 return ret;
4577
4578 case SIOCGIFMEM:
4579 /* Get the per device memory space. We can add this but
4580 * currently do not support it */
4581 case SIOCSIFMEM:
4582 /* Set the per device memory buffer space.
4583 * Not applicable in our case */
4584 case SIOCSIFLINK:
4585 return -EINVAL;
4586
4587 /*
4588 * Unknown or private ioctl.
4589 */
4590 default:
4591 if (cmd == SIOCWANDEV ||
4592 (cmd >= SIOCDEVPRIVATE &&
4593 cmd <= SIOCDEVPRIVATE + 15)) {
Eric W. Biederman881d9662007-09-17 11:56:21 -07004594 dev_load(net, ifr.ifr_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595 rtnl_lock();
Eric W. Biederman881d9662007-09-17 11:56:21 -07004596 ret = dev_ifsioc(net, &ifr, cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 rtnl_unlock();
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004598 if (!ret && copy_to_user(arg, &ifr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 sizeof(struct ifreq)))
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004600 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601 return ret;
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004602 }
4603 /* Take care of Wireless Extensions */
4604 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4605 return wext_handle_ioctl(net, &ifr, cmd, arg);
4606 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607 }
4608}
4609
4610
4611/**
4612 * dev_new_index - allocate an ifindex
Randy Dunlapc4ea43c2007-10-12 21:17:49 -07004613 * @net: the applicable net namespace
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 *
4615 * Returns a suitable unique value for a new device interface
4616 * number. The caller must hold the rtnl semaphore or the
4617 * dev_base_lock to be sure it remains unique.
4618 */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004619static int dev_new_index(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620{
4621 static int ifindex;
4622 for (;;) {
4623 if (++ifindex <= 0)
4624 ifindex = 1;
Eric W. Biederman881d9662007-09-17 11:56:21 -07004625 if (!__dev_get_by_index(net, ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 return ifindex;
4627 }
4628}
4629
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630/* Delayed registration/unregisteration */
Denis Cheng3b5b34f2007-12-07 00:49:17 -08004631static LIST_HEAD(net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632
Stephen Hemminger6f05f622007-03-08 20:46:03 -08004633static void net_set_todo(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 list_add_tail(&dev->todo_list, &net_todo_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636}
4637
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004638static void rollback_registered(struct net_device *dev)
4639{
4640 BUG_ON(dev_boot_phase);
4641 ASSERT_RTNL();
4642
4643 /* Some devices call without registering for initialization unwind. */
4644 if (dev->reg_state == NETREG_UNINITIALIZED) {
4645 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4646 "was registered\n", dev->name, dev);
4647
4648 WARN_ON(1);
4649 return;
4650 }
4651
4652 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4653
4654 /* If device is running, close it first. */
4655 dev_close(dev);
4656
4657 /* And unlink it from device chain. */
4658 unlist_netdevice(dev);
4659
4660 dev->reg_state = NETREG_UNREGISTERING;
4661
4662 synchronize_net();
4663
4664 /* Shutdown queueing discipline. */
4665 dev_shutdown(dev);
4666
4667
4668 /* Notify protocols, that we are about to destroy
4669 this device. They should clean all the things.
4670 */
4671 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4672
4673 /*
4674 * Flush the unicast and multicast chains
4675 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00004676 dev_unicast_flush(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004677 dev_addr_discard(dev);
4678
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004679 if (dev->netdev_ops->ndo_uninit)
4680 dev->netdev_ops->ndo_uninit(dev);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004681
4682 /* Notifier chain MUST detach us from master device. */
Ilpo Järvinen547b7922008-07-25 21:43:18 -07004683 WARN_ON(dev->master);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004684
4685 /* Remove entries from kobject tree */
4686 netdev_unregister_kobject(dev);
4687
4688 synchronize_net();
4689
4690 dev_put(dev);
4691}
4692
David S. Millere8a04642008-07-17 00:34:19 -07004693static void __netdev_init_queue_locks_one(struct net_device *dev,
4694 struct netdev_queue *dev_queue,
4695 void *_unused)
David S. Millerc773e842008-07-08 23:13:53 -07004696{
4697 spin_lock_init(&dev_queue->_xmit_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004698 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
David S. Millerc773e842008-07-08 23:13:53 -07004699 dev_queue->xmit_lock_owner = -1;
4700}
4701
4702static void netdev_init_queue_locks(struct net_device *dev)
4703{
David S. Millere8a04642008-07-17 00:34:19 -07004704 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4705 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
David S. Millerc773e842008-07-08 23:13:53 -07004706}
4707
Herbert Xub63365a2008-10-23 01:11:29 -07004708unsigned long netdev_fix_features(unsigned long features, const char *name)
4709{
4710 /* Fix illegal SG+CSUM combinations. */
4711 if ((features & NETIF_F_SG) &&
4712 !(features & NETIF_F_ALL_CSUM)) {
4713 if (name)
4714 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4715 "checksum feature.\n", name);
4716 features &= ~NETIF_F_SG;
4717 }
4718
4719 /* TSO requires that SG is present as well. */
4720 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4721 if (name)
4722 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4723 "SG feature.\n", name);
4724 features &= ~NETIF_F_TSO;
4725 }
4726
4727 if (features & NETIF_F_UFO) {
4728 if (!(features & NETIF_F_GEN_CSUM)) {
4729 if (name)
4730 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4731 "since no NETIF_F_HW_CSUM feature.\n",
4732 name);
4733 features &= ~NETIF_F_UFO;
4734 }
4735
4736 if (!(features & NETIF_F_SG)) {
4737 if (name)
4738 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4739 "since no NETIF_F_SG feature.\n", name);
4740 features &= ~NETIF_F_UFO;
4741 }
4742 }
4743
4744 return features;
4745}
4746EXPORT_SYMBOL(netdev_fix_features);
4747
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748/**
4749 * register_netdevice - register a network device
4750 * @dev: device to register
4751 *
4752 * Take a completed network device structure and add it to the kernel
4753 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4754 * chain. 0 is returned on success. A negative errno code is returned
4755 * on a failure to set up the device, or if the name is a duplicate.
4756 *
4757 * Callers must hold the rtnl semaphore. You may want
4758 * register_netdev() instead of this.
4759 *
4760 * BUGS:
4761 * The locking appears insufficient to guarantee two parallel registers
4762 * will not get the same name.
4763 */
4764
4765int register_netdevice(struct net_device *dev)
4766{
4767 struct hlist_head *head;
4768 struct hlist_node *p;
4769 int ret;
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004770 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004771
4772 BUG_ON(dev_boot_phase);
4773 ASSERT_RTNL();
4774
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004775 might_sleep();
4776
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777 /* When net_device's are persistent, this will be fatal. */
4778 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004779 BUG_ON(!net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780
David S. Millerf1f28aa2008-07-15 00:08:33 -07004781 spin_lock_init(&dev->addr_list_lock);
David S. Millercf508b12008-07-22 14:16:42 -07004782 netdev_set_addr_lockdep_class(dev);
David S. Millerc773e842008-07-08 23:13:53 -07004783 netdev_init_queue_locks(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785 dev->iflink = -1;
4786
4787 /* Init, if this function is available */
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004788 if (dev->netdev_ops->ndo_init) {
4789 ret = dev->netdev_ops->ndo_init(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 if (ret) {
4791 if (ret > 0)
4792 ret = -EIO;
Adrian Bunk90833aa2006-11-13 16:02:22 -08004793 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004794 }
4795 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004796
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797 if (!dev_valid_name(dev->name)) {
4798 ret = -EINVAL;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004799 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 }
4801
Eric W. Biederman881d9662007-09-17 11:56:21 -07004802 dev->ifindex = dev_new_index(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803 if (dev->iflink == -1)
4804 dev->iflink = dev->ifindex;
4805
4806 /* Check for existence of name */
Eric W. Biederman881d9662007-09-17 11:56:21 -07004807 head = dev_name_hash(net, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 hlist_for_each(p, head) {
4809 struct net_device *d
4810 = hlist_entry(p, struct net_device, name_hlist);
4811 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4812 ret = -EEXIST;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004813 goto err_uninit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816
Stephen Hemmingerd212f872007-06-27 00:47:37 -07004817 /* Fix illegal checksum combinations */
4818 if ((dev->features & NETIF_F_HW_CSUM) &&
4819 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4820 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4821 dev->name);
4822 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4823 }
4824
4825 if ((dev->features & NETIF_F_NO_CSUM) &&
4826 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4827 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4828 dev->name);
4829 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4830 }
4831
Herbert Xub63365a2008-10-23 01:11:29 -07004832 dev->features = netdev_fix_features(dev->features, dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833
Lennert Buytenheke5a4a722008-08-03 01:23:10 -07004834 /* Enable software GSO if SG is supported. */
4835 if (dev->features & NETIF_F_SG)
4836 dev->features |= NETIF_F_GSO;
4837
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07004838 netdev_initialize_kobject(dev);
Johannes Berg7ffbe3f2009-10-02 05:15:27 +00004839
4840 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4841 ret = notifier_to_errno(ret);
4842 if (ret)
4843 goto err_uninit;
4844
Eric W. Biederman8b41d182007-09-26 22:02:53 -07004845 ret = netdev_register_kobject(dev);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004846 if (ret)
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004847 goto err_uninit;
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07004848 dev->reg_state = NETREG_REGISTERED;
4849
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 /*
4851 * Default initial state at registry is that the
4852 * device is present.
4853 */
4854
4855 set_bit(__LINK_STATE_PRESENT, &dev->state);
4856
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857 dev_init_scheduler(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858 dev_hold(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02004859 list_netdevice(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004860
4861 /* Notify protocols, that a new device appeared. */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004862 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
Herbert Xufcc5a032007-07-30 17:03:38 -07004863 ret = notifier_to_errno(ret);
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07004864 if (ret) {
4865 rollback_registered(dev);
4866 dev->reg_state = NETREG_UNREGISTERED;
4867 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868
4869out:
4870 return ret;
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004871
4872err_uninit:
Stephen Hemmingerd3147742008-11-19 21:32:24 -08004873 if (dev->netdev_ops->ndo_uninit)
4874 dev->netdev_ops->ndo_uninit(dev);
Herbert Xu7ce1b0e2007-07-30 16:29:40 -07004875 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07004877EXPORT_SYMBOL(register_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878
4879/**
Benjamin Herrenschmidt937f1ba2009-01-14 21:05:05 -08004880 * init_dummy_netdev - init a dummy network device for NAPI
4881 * @dev: device to init
4882 *
4883 * This takes a network device structure and initialize the minimum
4884 * amount of fields so it can be used to schedule NAPI polls without
4885 * registering a full blown interface. This is to be used by drivers
4886 * that need to tie several hardware interfaces to a single NAPI
4887 * poll scheduler due to HW limitations.
4888 */
4889int init_dummy_netdev(struct net_device *dev)
4890{
4891 /* Clear everything. Note we don't initialize spinlocks
4892 * are they aren't supposed to be taken by any of the
4893 * NAPI code and this dummy netdev is supposed to be
4894 * only ever used for NAPI polls
4895 */
4896 memset(dev, 0, sizeof(struct net_device));
4897
4898 /* make sure we BUG if trying to hit standard
4899 * register/unregister code path
4900 */
4901 dev->reg_state = NETREG_DUMMY;
4902
4903 /* initialize the ref count */
4904 atomic_set(&dev->refcnt, 1);
4905
4906 /* NAPI wants this */
4907 INIT_LIST_HEAD(&dev->napi_list);
4908
4909 /* a dummy interface is started by default */
4910 set_bit(__LINK_STATE_PRESENT, &dev->state);
4911 set_bit(__LINK_STATE_START, &dev->state);
4912
4913 return 0;
4914}
4915EXPORT_SYMBOL_GPL(init_dummy_netdev);
4916
4917
4918/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 * register_netdev - register a network device
4920 * @dev: device to register
4921 *
4922 * Take a completed network device structure and add it to the kernel
4923 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4924 * chain. 0 is returned on success. A negative errno code is returned
4925 * on a failure to set up the device, or if the name is a duplicate.
4926 *
Borislav Petkov38b4da32007-04-20 22:14:10 -07004927 * This is a wrapper around register_netdevice that takes the rtnl semaphore
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928 * and expands the device name if you passed a format string to
4929 * alloc_netdev.
4930 */
4931int register_netdev(struct net_device *dev)
4932{
4933 int err;
4934
4935 rtnl_lock();
4936
4937 /*
4938 * If the name is a format string the caller wants us to do a
4939 * name allocation.
4940 */
4941 if (strchr(dev->name, '%')) {
4942 err = dev_alloc_name(dev, dev->name);
4943 if (err < 0)
4944 goto out;
4945 }
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004946
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 err = register_netdevice(dev);
4948out:
4949 rtnl_unlock();
4950 return err;
4951}
4952EXPORT_SYMBOL(register_netdev);
4953
4954/*
4955 * netdev_wait_allrefs - wait until all references are gone.
4956 *
4957 * This is called when unregistering network devices.
4958 *
4959 * Any protocol or device that holds a reference should register
4960 * for netdevice notification, and cleanup and put back the
4961 * reference if they receive an UNREGISTER event.
4962 * We can get stuck here if buggy protocols don't correctly
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09004963 * call dev_put.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 */
4965static void netdev_wait_allrefs(struct net_device *dev)
4966{
4967 unsigned long rebroadcast_time, warning_time;
4968
4969 rebroadcast_time = warning_time = jiffies;
4970 while (atomic_read(&dev->refcnt) != 0) {
4971 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004972 rtnl_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973
4974 /* Rebroadcast unregister notification */
Pavel Emelyanov056925a2007-09-16 15:42:43 -07004975 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976
4977 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4978 &dev->state)) {
4979 /* We must not have linkwatch events
4980 * pending on unregister. If this
4981 * happens, we simply run the queue
4982 * unscheduled, resulting in a noop
4983 * for this device.
4984 */
4985 linkwatch_run_queue();
4986 }
4987
Stephen Hemminger6756ae42006-03-20 22:23:58 -08004988 __rtnl_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989
4990 rebroadcast_time = jiffies;
4991 }
4992
4993 msleep(250);
4994
4995 if (time_after(jiffies, warning_time + 10 * HZ)) {
4996 printk(KERN_EMERG "unregister_netdevice: "
4997 "waiting for %s to become free. Usage "
4998 "count = %d\n",
4999 dev->name, atomic_read(&dev->refcnt));
5000 warning_time = jiffies;
5001 }
5002 }
5003}
5004
5005/* The sequence is:
5006 *
5007 * rtnl_lock();
5008 * ...
5009 * register_netdevice(x1);
5010 * register_netdevice(x2);
5011 * ...
5012 * unregister_netdevice(y1);
5013 * unregister_netdevice(y2);
5014 * ...
5015 * rtnl_unlock();
5016 * free_netdev(y1);
5017 * free_netdev(y2);
5018 *
Herbert Xu58ec3b42008-10-07 15:50:03 -07005019 * We are invoked by rtnl_unlock().
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020 * This allows us to deal with problems:
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005021 * 1) We can delete sysfs objects which invoke hotplug
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022 * without deadlocking with linkwatch via keventd.
5023 * 2) Since we run with the RTNL semaphore not held, we can sleep
5024 * safely in order to wait for the netdev refcnt to drop to zero.
Herbert Xu58ec3b42008-10-07 15:50:03 -07005025 *
5026 * We must not return until all unregister events added during
5027 * the interval the lock was held have been completed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005029void netdev_run_todo(void)
5030{
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005031 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033 /* Snapshot list, allow later requests */
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005034 list_replace_init(&net_todo_list, &list);
Herbert Xu58ec3b42008-10-07 15:50:03 -07005035
5036 __rtnl_unlock();
Oleg Nesterov626ab0e2006-06-23 02:05:55 -07005037
Linus Torvalds1da177e2005-04-16 15:20:36 -07005038 while (!list_empty(&list)) {
5039 struct net_device *dev
5040 = list_entry(list.next, struct net_device, todo_list);
5041 list_del(&dev->todo_list);
5042
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005043 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005044 printk(KERN_ERR "network todo '%s' but state %d\n",
5045 dev->name, dev->reg_state);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005046 dump_stack();
5047 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005048 }
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005049
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005050 dev->reg_state = NETREG_UNREGISTERED;
5051
Stephen Hemminger6e583ce2008-08-03 21:29:57 -07005052 on_each_cpu(flush_backlog, dev, 1);
5053
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005054 netdev_wait_allrefs(dev);
5055
5056 /* paranoia */
5057 BUG_ON(atomic_read(&dev->refcnt));
Ilpo Järvinen547b7922008-07-25 21:43:18 -07005058 WARN_ON(dev->ip_ptr);
5059 WARN_ON(dev->ip6_ptr);
5060 WARN_ON(dev->dn_ptr);
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005061
Stephen Hemmingerb17a7c12006-05-10 13:21:17 -07005062 if (dev->destructor)
5063 dev->destructor(dev);
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07005064
5065 /* Free network device */
5066 kobject_put(&dev->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068}
5069
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005070/**
5071 * dev_get_stats - get network device statistics
5072 * @dev: device to get statistics from
5073 *
5074 * Get network statistics from device. The device driver may provide
5075 * its own method by setting dev->netdev_ops->get_stats; otherwise
5076 * the internal statistics structure is used.
5077 */
5078const struct net_device_stats *dev_get_stats(struct net_device *dev)
Eric Dumazet7004bf22009-05-18 00:34:33 +00005079{
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005080 const struct net_device_ops *ops = dev->netdev_ops;
5081
5082 if (ops->ndo_get_stats)
5083 return ops->ndo_get_stats(dev);
Eric Dumazet7004bf22009-05-18 00:34:33 +00005084 else {
5085 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5086 struct net_device_stats *stats = &dev->stats;
5087 unsigned int i;
5088 struct netdev_queue *txq;
5089
5090 for (i = 0; i < dev->num_tx_queues; i++) {
5091 txq = netdev_get_tx_queue(dev, i);
5092 tx_bytes += txq->tx_bytes;
5093 tx_packets += txq->tx_packets;
5094 tx_dropped += txq->tx_dropped;
5095 }
5096 if (tx_bytes || tx_packets || tx_dropped) {
5097 stats->tx_bytes = tx_bytes;
5098 stats->tx_packets = tx_packets;
5099 stats->tx_dropped = tx_dropped;
5100 }
5101 return stats;
5102 }
Rusty Russellc45d2862007-03-28 14:29:08 -07005103}
Stephen Hemmingereeda3fd2008-11-19 21:40:23 -08005104EXPORT_SYMBOL(dev_get_stats);
Rusty Russellc45d2862007-03-28 14:29:08 -07005105
David S. Millerdc2b4842008-07-08 17:18:23 -07005106static void netdev_init_one_queue(struct net_device *dev,
David S. Millere8a04642008-07-17 00:34:19 -07005107 struct netdev_queue *queue,
5108 void *_unused)
David S. Millerdc2b4842008-07-08 17:18:23 -07005109{
David S. Millerdc2b4842008-07-08 17:18:23 -07005110 queue->dev = dev;
5111}
5112
David S. Millerbb949fb2008-07-08 16:55:56 -07005113static void netdev_init_queues(struct net_device *dev)
5114{
David S. Millere8a04642008-07-17 00:34:19 -07005115 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5116 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
David S. Millerc3f26a22008-07-31 16:58:50 -07005117 spin_lock_init(&dev->tx_global_lock);
David S. Millerbb949fb2008-07-08 16:55:56 -07005118}
5119
Linus Torvalds1da177e2005-04-16 15:20:36 -07005120/**
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005121 * alloc_netdev_mq - allocate network device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005122 * @sizeof_priv: size of private data to allocate space for
5123 * @name: device name format string
5124 * @setup: callback to initialize device
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005125 * @queue_count: the number of subqueues to allocate
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126 *
5127 * Allocates a struct net_device with private data area for driver use
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005128 * and performs basic initialization. Also allocates subquue structs
5129 * for each queue on the device at the end of the netdevice.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130 */
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005131struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5132 void (*setup)(struct net_device *), unsigned int queue_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133{
David S. Millere8a04642008-07-17 00:34:19 -07005134 struct netdev_queue *tx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 struct net_device *dev;
Stephen Hemminger79439862008-07-21 13:28:44 -07005136 size_t alloc_size;
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005137 struct net_device *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005139 BUG_ON(strlen(name) >= sizeof(dev->name));
5140
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005141 alloc_size = sizeof(struct net_device);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005142 if (sizeof_priv) {
5143 /* ensure 32-byte alignment of private area */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005144 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
Alexey Dobriyand1643d22008-04-18 15:43:32 -07005145 alloc_size += sizeof_priv;
5146 }
5147 /* ensure 32-byte alignment of whole construct */
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005148 alloc_size += NETDEV_ALIGN - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149
Paolo 'Blaisorblade' Giarrusso31380de2006-04-06 22:38:28 -07005150 p = kzalloc(alloc_size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 if (!p) {
Stephen Hemmingerb6fe17d2006-08-29 17:06:13 -07005152 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 return NULL;
5154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155
Stephen Hemminger79439862008-07-21 13:28:44 -07005156 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
David S. Millere8a04642008-07-17 00:34:19 -07005157 if (!tx) {
5158 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5159 "tx qdiscs.\n");
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005160 goto free_p;
David S. Millere8a04642008-07-17 00:34:19 -07005161 }
5162
Eric Dumazet1ce8e7b2009-05-27 04:42:37 +00005163 dev = PTR_ALIGN(p, NETDEV_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 dev->padded = (char *)dev - (char *)p;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005165
5166 if (dev_addr_init(dev))
5167 goto free_tx;
5168
Jiri Pirkoccffad252009-05-22 23:22:17 +00005169 dev_unicast_init(dev);
5170
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005171 dev_net_set(dev, &init_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
David S. Millere8a04642008-07-17 00:34:19 -07005173 dev->_tx = tx;
5174 dev->num_tx_queues = queue_count;
David S. Millerfd2ea0a2008-07-17 01:56:23 -07005175 dev->real_num_tx_queues = queue_count;
David S. Millere8a04642008-07-17 00:34:19 -07005176
Peter P Waskiewicz Jr82cc1a72008-03-21 03:43:19 -07005177 dev->gso_max_size = GSO_MAX_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178
David S. Millerbb949fb2008-07-08 16:55:56 -07005179 netdev_init_queues(dev);
5180
Herbert Xud565b0a2008-12-15 23:38:52 -08005181 INIT_LIST_HEAD(&dev->napi_list);
Eric Dumazet93f154b2009-05-18 22:19:19 -07005182 dev->priv_flags = IFF_XMIT_DST_RELEASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 setup(dev);
5184 strcpy(dev->name, name);
5185 return dev;
Jiri Pirkoab9c73c2009-05-08 13:30:17 +00005186
5187free_tx:
5188 kfree(tx);
5189
5190free_p:
5191 kfree(p);
5192 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193}
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -07005194EXPORT_SYMBOL(alloc_netdev_mq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
5196/**
5197 * free_netdev - free network device
5198 * @dev: device
5199 *
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005200 * This function does the last stage of destroying an allocated device
5201 * interface. The reference to the device object is released.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202 * If this is the last reference then it will be freed.
5203 */
5204void free_netdev(struct net_device *dev)
5205{
Herbert Xud565b0a2008-12-15 23:38:52 -08005206 struct napi_struct *p, *n;
5207
Denis V. Lunevf3005d72008-04-16 02:02:18 -07005208 release_net(dev_net(dev));
5209
David S. Millere8a04642008-07-17 00:34:19 -07005210 kfree(dev->_tx);
5211
Jiri Pirkof001fde2009-05-05 02:48:28 +00005212 /* Flush device addresses */
5213 dev_addr_flush(dev);
5214
Herbert Xud565b0a2008-12-15 23:38:52 -08005215 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5216 netif_napi_del(p);
5217
Stephen Hemminger3041a062006-05-26 13:25:24 -07005218 /* Compatibility with error handling in drivers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 if (dev->reg_state == NETREG_UNINITIALIZED) {
5220 kfree((char *)dev - dev->padded);
5221 return;
5222 }
5223
5224 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5225 dev->reg_state = NETREG_RELEASED;
5226
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07005227 /* will free via device release */
5228 put_device(&dev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005230EXPORT_SYMBOL(free_netdev);
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005231
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005232/**
5233 * synchronize_net - Synchronize with packet receive processing
5234 *
5235 * Wait for packets currently being received to be done.
5236 * Does not block later packets from starting.
5237 */
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005238void synchronize_net(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239{
5240 might_sleep();
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005241 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005243EXPORT_SYMBOL(synchronize_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244
5245/**
5246 * unregister_netdevice - remove device from the kernel
5247 * @dev: device
5248 *
5249 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005250 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005251 *
5252 * Callers must hold the rtnl semaphore. You may want
5253 * unregister_netdev() instead of this.
5254 */
5255
Stephen Hemminger22f8cde2007-02-07 00:09:58 -08005256void unregister_netdevice(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257{
Herbert Xua6620712007-12-12 19:21:56 -08005258 ASSERT_RTNL();
5259
Daniel Lezcano93ee31f2007-10-30 15:38:18 -07005260 rollback_registered(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 /* Finish processing unregister after unlock */
5262 net_set_todo(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263}
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005264EXPORT_SYMBOL(unregister_netdevice);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265
5266/**
5267 * unregister_netdev - remove device from the kernel
5268 * @dev: device
5269 *
5270 * This function shuts down a device interface and removes it
Wang Chend59b54b2007-12-11 02:28:03 -08005271 * from the kernel tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 *
5273 * This is just a wrapper for unregister_netdevice that takes
5274 * the rtnl semaphore. In general you want to use this and not
5275 * unregister_netdevice.
5276 */
5277void unregister_netdev(struct net_device *dev)
5278{
5279 rtnl_lock();
5280 unregister_netdevice(dev);
5281 rtnl_unlock();
5282}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005283EXPORT_SYMBOL(unregister_netdev);
5284
Eric W. Biedermance286d32007-09-12 13:53:49 +02005285/**
5286 * dev_change_net_namespace - move device to different nethost namespace
5287 * @dev: device
5288 * @net: network namespace
5289 * @pat: If not NULL name pattern to try if the current device name
5290 * is already taken in the destination network namespace.
5291 *
5292 * This function shuts down a device interface and moves it
5293 * to a new network namespace. On success 0 is returned, on
5294 * a failure a netagive errno code is returned.
5295 *
5296 * Callers must hold the rtnl semaphore.
5297 */
5298
5299int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5300{
5301 char buf[IFNAMSIZ];
5302 const char *destname;
5303 int err;
5304
5305 ASSERT_RTNL();
5306
5307 /* Don't allow namespace local devices to be moved. */
5308 err = -EINVAL;
5309 if (dev->features & NETIF_F_NETNS_LOCAL)
5310 goto out;
5311
Eric W. Biederman38918452008-10-27 17:51:47 -07005312#ifdef CONFIG_SYSFS
5313 /* Don't allow real devices to be moved when sysfs
5314 * is enabled.
5315 */
5316 err = -EINVAL;
5317 if (dev->dev.parent)
5318 goto out;
5319#endif
5320
Eric W. Biedermance286d32007-09-12 13:53:49 +02005321 /* Ensure the device has been registrered */
5322 err = -EINVAL;
5323 if (dev->reg_state != NETREG_REGISTERED)
5324 goto out;
5325
5326 /* Get out if there is nothing todo */
5327 err = 0;
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09005328 if (net_eq(dev_net(dev), net))
Eric W. Biedermance286d32007-09-12 13:53:49 +02005329 goto out;
5330
5331 /* Pick the destination device name, and ensure
5332 * we can use it in the destination network namespace.
5333 */
5334 err = -EEXIST;
5335 destname = dev->name;
5336 if (__dev_get_by_name(net, destname)) {
5337 /* We get here if we can't use the current device name */
5338 if (!pat)
5339 goto out;
5340 if (!dev_valid_name(pat))
5341 goto out;
5342 if (strchr(pat, '%')) {
5343 if (__dev_alloc_name(net, pat, buf) < 0)
5344 goto out;
5345 destname = buf;
5346 } else
5347 destname = pat;
5348 if (__dev_get_by_name(net, destname))
5349 goto out;
5350 }
5351
5352 /*
5353 * And now a mini version of register_netdevice unregister_netdevice.
5354 */
5355
5356 /* If device is running close it first. */
Pavel Emelyanov9b772652007-10-10 02:49:09 -07005357 dev_close(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005358
5359 /* And unlink it from device chain */
5360 err = -ENODEV;
5361 unlist_netdevice(dev);
5362
5363 synchronize_net();
5364
5365 /* Shutdown queueing discipline. */
5366 dev_shutdown(dev);
5367
5368 /* Notify protocols, that we are about to destroy
5369 this device. They should clean all the things.
5370 */
5371 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5372
5373 /*
5374 * Flush the unicast and multicast chains
5375 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00005376 dev_unicast_flush(dev);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005377 dev_addr_discard(dev);
5378
Eric W. Biederman38918452008-10-27 17:51:47 -07005379 netdev_unregister_kobject(dev);
5380
Eric W. Biedermance286d32007-09-12 13:53:49 +02005381 /* Actually switch the network namespace */
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005382 dev_net_set(dev, net);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005383
5384 /* Assign the new device name */
5385 if (destname != dev->name)
5386 strcpy(dev->name, destname);
5387
5388 /* If there is an ifindex conflict assign a new one */
5389 if (__dev_get_by_index(net, dev->ifindex)) {
5390 int iflink = (dev->iflink == dev->ifindex);
5391 dev->ifindex = dev_new_index(net);
5392 if (iflink)
5393 dev->iflink = dev->ifindex;
5394 }
5395
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005396 /* Fixup kobjects */
Daniel Lezcanoaaf8cdc2008-05-02 17:00:58 -07005397 err = netdev_register_kobject(dev);
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005398 WARN_ON(err);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005399
5400 /* Add the device back in the hashes */
5401 list_netdevice(dev);
5402
5403 /* Notify protocols, that a new device appeared. */
5404 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5405
5406 synchronize_net();
5407 err = 0;
5408out:
5409 return err;
5410}
Johannes Berg463d0182009-07-14 00:33:35 +02005411EXPORT_SYMBOL_GPL(dev_change_net_namespace);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005412
Linus Torvalds1da177e2005-04-16 15:20:36 -07005413static int dev_cpu_callback(struct notifier_block *nfb,
5414 unsigned long action,
5415 void *ocpu)
5416{
5417 struct sk_buff **list_skb;
David S. Miller37437bb2008-07-16 02:15:04 -07005418 struct Qdisc **list_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 struct sk_buff *skb;
5420 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5421 struct softnet_data *sd, *oldsd;
5422
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07005423 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424 return NOTIFY_OK;
5425
5426 local_irq_disable();
5427 cpu = smp_processor_id();
5428 sd = &per_cpu(softnet_data, cpu);
5429 oldsd = &per_cpu(softnet_data, oldcpu);
5430
5431 /* Find end of our completion_queue. */
5432 list_skb = &sd->completion_queue;
5433 while (*list_skb)
5434 list_skb = &(*list_skb)->next;
5435 /* Append completion queue from offline CPU. */
5436 *list_skb = oldsd->completion_queue;
5437 oldsd->completion_queue = NULL;
5438
5439 /* Find end of our output_queue. */
5440 list_net = &sd->output_queue;
5441 while (*list_net)
5442 list_net = &(*list_net)->next_sched;
5443 /* Append output queue from offline CPU. */
5444 *list_net = oldsd->output_queue;
5445 oldsd->output_queue = NULL;
5446
5447 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5448 local_irq_enable();
5449
5450 /* Process offline CPU's input_pkt_queue */
5451 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5452 netif_rx(skb);
5453
5454 return NOTIFY_OK;
5455}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005456
5457
Herbert Xu7f353bf2007-08-10 15:47:58 -07005458/**
Herbert Xub63365a2008-10-23 01:11:29 -07005459 * netdev_increment_features - increment feature set by one
5460 * @all: current feature set
5461 * @one: new feature set
5462 * @mask: mask feature set
Herbert Xu7f353bf2007-08-10 15:47:58 -07005463 *
5464 * Computes a new feature set after adding a device with feature set
Herbert Xub63365a2008-10-23 01:11:29 -07005465 * @one to the master device with current feature set @all. Will not
5466 * enable anything that is off in @mask. Returns the new feature set.
Herbert Xu7f353bf2007-08-10 15:47:58 -07005467 */
Herbert Xub63365a2008-10-23 01:11:29 -07005468unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5469 unsigned long mask)
Herbert Xu7f353bf2007-08-10 15:47:58 -07005470{
Herbert Xub63365a2008-10-23 01:11:29 -07005471 /* If device needs checksumming, downgrade to it. */
Eric Dumazetd1b19df2009-09-03 01:29:39 -07005472 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
Herbert Xub63365a2008-10-23 01:11:29 -07005473 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5474 else if (mask & NETIF_F_ALL_CSUM) {
5475 /* If one device supports v4/v6 checksumming, set for all. */
5476 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5477 !(all & NETIF_F_GEN_CSUM)) {
5478 all &= ~NETIF_F_ALL_CSUM;
5479 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5480 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005481
Herbert Xub63365a2008-10-23 01:11:29 -07005482 /* If one device supports hw checksumming, set for all. */
5483 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5484 all &= ~NETIF_F_ALL_CSUM;
5485 all |= NETIF_F_HW_CSUM;
5486 }
5487 }
Herbert Xu7f353bf2007-08-10 15:47:58 -07005488
Herbert Xub63365a2008-10-23 01:11:29 -07005489 one |= NETIF_F_ALL_CSUM;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005490
Herbert Xub63365a2008-10-23 01:11:29 -07005491 one |= all & NETIF_F_ONE_FOR_ALL;
Sridhar Samudralad9f59502009-10-07 12:24:25 +00005492 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
Herbert Xub63365a2008-10-23 01:11:29 -07005493 all |= one & mask & NETIF_F_ONE_FOR_ALL;
Herbert Xu7f353bf2007-08-10 15:47:58 -07005494
5495 return all;
5496}
Herbert Xub63365a2008-10-23 01:11:29 -07005497EXPORT_SYMBOL(netdev_increment_features);
Herbert Xu7f353bf2007-08-10 15:47:58 -07005498
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005499static struct hlist_head *netdev_create_hash(void)
5500{
5501 int i;
5502 struct hlist_head *hash;
5503
5504 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5505 if (hash != NULL)
5506 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5507 INIT_HLIST_HEAD(&hash[i]);
5508
5509 return hash;
5510}
5511
Eric W. Biederman881d9662007-09-17 11:56:21 -07005512/* Initialize per network namespace state */
Pavel Emelyanov46650792007-10-08 20:38:39 -07005513static int __net_init netdev_init(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005514{
Eric W. Biederman881d9662007-09-17 11:56:21 -07005515 INIT_LIST_HEAD(&net->dev_base_head);
Eric W. Biederman881d9662007-09-17 11:56:21 -07005516
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005517 net->dev_name_head = netdev_create_hash();
5518 if (net->dev_name_head == NULL)
5519 goto err_name;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005520
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005521 net->dev_index_head = netdev_create_hash();
5522 if (net->dev_index_head == NULL)
5523 goto err_idx;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005524
5525 return 0;
Pavel Emelyanov30d97d32007-09-16 15:40:33 -07005526
5527err_idx:
5528 kfree(net->dev_name_head);
5529err_name:
5530 return -ENOMEM;
Eric W. Biederman881d9662007-09-17 11:56:21 -07005531}
5532
Stephen Hemmingerf0db2752008-09-30 02:23:58 -07005533/**
5534 * netdev_drivername - network driver for the device
5535 * @dev: network device
5536 * @buffer: buffer for resulting name
5537 * @len: size of buffer
5538 *
5539 * Determine network driver for device.
5540 */
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005541char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
Arjan van de Ven6579e572008-07-21 13:31:48 -07005542{
Stephen Hemmingercf04a4c72008-09-30 02:22:14 -07005543 const struct device_driver *driver;
5544 const struct device *parent;
Arjan van de Ven6579e572008-07-21 13:31:48 -07005545
5546 if (len <= 0 || !buffer)
5547 return buffer;
5548 buffer[0] = 0;
5549
5550 parent = dev->dev.parent;
5551
5552 if (!parent)
5553 return buffer;
5554
5555 driver = parent->driver;
5556 if (driver && driver->name)
5557 strlcpy(buffer, driver->name, len);
5558 return buffer;
5559}
5560
Pavel Emelyanov46650792007-10-08 20:38:39 -07005561static void __net_exit netdev_exit(struct net *net)
Eric W. Biederman881d9662007-09-17 11:56:21 -07005562{
5563 kfree(net->dev_name_head);
5564 kfree(net->dev_index_head);
5565}
5566
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005567static struct pernet_operations __net_initdata netdev_net_ops = {
Eric W. Biederman881d9662007-09-17 11:56:21 -07005568 .init = netdev_init,
5569 .exit = netdev_exit,
5570};
5571
Pavel Emelyanov46650792007-10-08 20:38:39 -07005572static void __net_exit default_device_exit(struct net *net)
Eric W. Biedermance286d32007-09-12 13:53:49 +02005573{
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005574 struct net_device *dev;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005575 /*
5576 * Push all migratable of the network devices back to the
5577 * initial network namespace
5578 */
5579 rtnl_lock();
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005580restart:
5581 for_each_netdev(net, dev) {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005582 int err;
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005583 char fb_name[IFNAMSIZ];
Eric W. Biedermance286d32007-09-12 13:53:49 +02005584
5585 /* Ignore unmoveable devices (i.e. loopback) */
5586 if (dev->features & NETIF_F_NETNS_LOCAL)
5587 continue;
5588
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005589 /* Delete virtual devices */
5590 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5591 dev->rtnl_link_ops->dellink(dev);
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005592 goto restart;
Eric W. Biedermand0c082c2008-11-05 15:59:38 -08005593 }
5594
Eric W. Biedermance286d32007-09-12 13:53:49 +02005595 /* Push remaing network devices to init_net */
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005596 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5597 err = dev_change_net_namespace(dev, &init_net, fb_name);
Eric W. Biedermance286d32007-09-12 13:53:49 +02005598 if (err) {
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005599 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
Eric W. Biedermance286d32007-09-12 13:53:49 +02005600 __func__, dev->name, err);
Pavel Emelyanovaca51392008-05-08 01:24:25 -07005601 BUG();
Eric W. Biedermance286d32007-09-12 13:53:49 +02005602 }
Eric W. Biederman8eb79862008-12-29 18:21:48 -08005603 goto restart;
Eric W. Biedermance286d32007-09-12 13:53:49 +02005604 }
5605 rtnl_unlock();
5606}
5607
Denis V. Lunev022cbae2007-11-13 03:23:50 -08005608static struct pernet_operations __net_initdata default_device_ops = {
Eric W. Biedermance286d32007-09-12 13:53:49 +02005609 .exit = default_device_exit,
5610};
5611
Linus Torvalds1da177e2005-04-16 15:20:36 -07005612/*
5613 * Initialize the DEV module. At boot time this walks the device list and
5614 * unhooks any devices that fail to initialise (normally hardware not
5615 * present) and leaves us with a valid list of present and active devices.
5616 *
5617 */
5618
5619/*
5620 * This is called single threaded during boot, so no need
5621 * to take the rtnl semaphore.
5622 */
5623static int __init net_dev_init(void)
5624{
5625 int i, rc = -ENOMEM;
5626
5627 BUG_ON(!dev_boot_phase);
5628
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 if (dev_proc_init())
5630 goto out;
5631
Eric W. Biederman8b41d182007-09-26 22:02:53 -07005632 if (netdev_kobject_init())
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 goto out;
5634
5635 INIT_LIST_HEAD(&ptype_all);
Pavel Emelyanov82d8a8672007-11-26 20:12:58 +08005636 for (i = 0; i < PTYPE_HASH_SIZE; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637 INIT_LIST_HEAD(&ptype_base[i]);
5638
Eric W. Biederman881d9662007-09-17 11:56:21 -07005639 if (register_pernet_subsys(&netdev_net_ops))
5640 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641
5642 /*
5643 * Initialise the packet receive queues.
5644 */
5645
KAMEZAWA Hiroyuki6f912042006-04-10 22:52:50 -07005646 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 struct softnet_data *queue;
5648
5649 queue = &per_cpu(softnet_data, i);
5650 skb_queue_head_init(&queue->input_pkt_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651 queue->completion_queue = NULL;
5652 INIT_LIST_HEAD(&queue->poll_list);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07005653
5654 queue->backlog.poll = process_backlog;
5655 queue->backlog.weight = weight_p;
Herbert Xud565b0a2008-12-15 23:38:52 -08005656 queue->backlog.gro_list = NULL;
Herbert Xu4ae55442009-02-08 18:00:36 +00005657 queue->backlog.gro_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658 }
5659
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660 dev_boot_phase = 0;
5661
Eric W. Biederman505d4f72008-11-07 22:54:20 -08005662 /* The loopback device is special if any other network devices
5663 * is present in a network namespace the loopback device must
5664 * be present. Since we now dynamically allocate and free the
5665 * loopback device ensure this invariant is maintained by
5666 * keeping the loopback device as the first device on the
5667 * list of network devices. Ensuring the loopback devices
5668 * is the first device that appears and the last network device
5669 * that disappears.
5670 */
5671 if (register_pernet_device(&loopback_net_ops))
5672 goto out;
5673
5674 if (register_pernet_device(&default_device_ops))
5675 goto out;
5676
Carlos R. Mafra962cf362008-05-15 11:15:37 -03005677 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5678 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679
5680 hotcpu_notifier(dev_cpu_callback, 0);
5681 dst_init();
5682 dev_mcast_init();
5683 rc = 0;
5684out:
5685 return rc;
5686}
5687
5688subsys_initcall(net_dev_init);
5689
Krishna Kumare88721f2009-02-18 17:55:02 -08005690static int __init initialize_hashrnd(void)
5691{
5692 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd));
5693 return 0;
5694}
5695
5696late_initcall_sync(initialize_hashrnd);
5697