blob: c66b8c2f3b22ee721c3e4a80be7e15f0d0f61fb3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net-sysfs.c - network device class and attributes
3 *
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09005 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
Randy Dunlap4fc268d2006-01-11 12:17:47 -080012#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070017#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <net/sock.h>
Eric W. Biederman608b4b92010-05-04 17:36:45 -070019#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/rtnetlink.h>
21#include <linux/wireless.h>
Tom Herbertfec5e652010-04-16 16:01:27 -070022#include <linux/vmalloc.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040023#include <linux/export.h>
Tom Herbert114cf582011-11-28 16:33:09 +000024#include <linux/jiffies.h>
Johannes Berg8f1546c2009-09-28 15:26:43 +020025#include <net/wext.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Pavel Emelyanov342709e2007-10-23 21:14:45 -070027#include "net-sysfs.h"
28
Eric W. Biederman8b41d182007-09-26 22:02:53 -070029#ifdef CONFIG_SYSFS
Linus Torvalds1da177e2005-04-16 15:20:36 -070030static const char fmt_hex[] = "%#x\n";
David S. Millerd1102b52005-05-29 20:28:25 -070031static const char fmt_long_hex[] = "%#lx\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070032static const char fmt_dec[] = "%d\n";
David Decotigny8ae6dac2011-04-27 18:32:38 +000033static const char fmt_udec[] = "%u\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070034static const char fmt_ulong[] = "%lu\n";
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +000035static const char fmt_u64[] = "%llu\n";
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +090037static inline int dev_isalive(const struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Stephen Hemmingerfe9925b2006-05-06 17:56:03 -070039 return dev->reg_state <= NETREG_REGISTERED;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040}
41
42/* use same locking rules as GIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070043static ssize_t netdev_show(const struct device *dev,
44 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 ssize_t (*format)(const struct net_device *, char *))
46{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070047 struct net_device *net = to_net_dev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 ssize_t ret = -EINVAL;
49
50 read_lock(&dev_base_lock);
51 if (dev_isalive(net))
52 ret = (*format)(net, buf);
53 read_unlock(&dev_base_lock);
54
55 return ret;
56}
57
58/* generate a show function for simple field */
59#define NETDEVICE_SHOW(field, format_string) \
60static ssize_t format_##field(const struct net_device *net, char *buf) \
61{ \
62 return sprintf(buf, format_string, net->field); \
63} \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070064static ssize_t show_##field(struct device *dev, \
65 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070067 return netdev_show(dev, attr, buf, format_##field); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
70
71/* use same locking and permission rules as SIF* ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -070072static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 const char *buf, size_t len,
74 int (*set)(struct net_device *, unsigned long))
75{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000076 struct net_device *netdev = to_net_dev(dev);
77 struct net *net = dev_net(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 unsigned long new;
79 int ret = -EINVAL;
80
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000081 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return -EPERM;
83
Shuah Khane1e420c2012-04-12 09:28:13 +000084 ret = kstrtoul(buf, 0, &new);
85 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 goto err;
87
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000088 if (!rtnl_trylock())
Eric W. Biederman336ca572009-05-13 16:57:25 +000089 return restart_syscall();
Stephen Hemminger5a5990d2009-02-26 06:49:24 +000090
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +000091 if (dev_isalive(netdev)) {
92 if ((ret = (*set)(netdev, new)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 ret = len;
94 }
95 rtnl_unlock();
96 err:
97 return ret;
98}
99
David Woodhouse9d296722008-04-20 16:07:43 -0700100NETDEVICE_SHOW(dev_id, fmt_hex);
Stefan Assmannc1f79422010-07-22 02:50:21 +0000101NETDEVICE_SHOW(addr_assign_type, fmt_dec);
Kay Sieversfd586ba2005-12-19 01:42:56 +0100102NETDEVICE_SHOW(addr_len, fmt_dec);
103NETDEVICE_SHOW(iflink, fmt_dec);
104NETDEVICE_SHOW(ifindex, fmt_dec);
Kay Sieversfd586ba2005-12-19 01:42:56 +0100105NETDEVICE_SHOW(type, fmt_dec);
Stefan Rompfb00055a2006-03-20 17:09:11 -0800106NETDEVICE_SHOW(link_mode, fmt_dec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108/* use same locking rules as GIFHWADDR ioctl's */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700109static ssize_t show_address(struct device *dev, struct device_attribute *attr,
110 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 struct net_device *net = to_net_dev(dev);
113 ssize_t ret = -EINVAL;
114
115 read_lock(&dev_base_lock);
116 if (dev_isalive(net))
Michael Chan7ffc49a2007-12-24 21:28:09 -0800117 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 read_unlock(&dev_base_lock);
119 return ret;
120}
121
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700122static ssize_t show_broadcast(struct device *dev,
123 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
125 struct net_device *net = to_net_dev(dev);
126 if (dev_isalive(net))
Michael Chan7ffc49a2007-12-24 21:28:09 -0800127 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return -EINVAL;
129}
130
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700131static ssize_t show_carrier(struct device *dev,
132 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct net_device *netdev = to_net_dev(dev);
135 if (netif_running(netdev)) {
136 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
137 }
138 return -EINVAL;
139}
140
Andy Gospodarekd519e172009-10-02 09:26:12 +0000141static ssize_t show_speed(struct device *dev,
142 struct device_attribute *attr, char *buf)
143{
144 struct net_device *netdev = to_net_dev(dev);
145 int ret = -EINVAL;
146
147 if (!rtnl_trylock())
148 return restart_syscall();
149
David Decotigny8ae6dac2011-04-27 18:32:38 +0000150 if (netif_running(netdev)) {
151 struct ethtool_cmd cmd;
Jiri Pirko4bc71cb2011-09-03 03:34:30 +0000152 if (!__ethtool_get_settings(netdev, &cmd))
David Decotigny8ae6dac2011-04-27 18:32:38 +0000153 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
Andy Gospodarekd519e172009-10-02 09:26:12 +0000154 }
155 rtnl_unlock();
156 return ret;
157}
158
159static ssize_t show_duplex(struct device *dev,
160 struct device_attribute *attr, char *buf)
161{
162 struct net_device *netdev = to_net_dev(dev);
163 int ret = -EINVAL;
164
165 if (!rtnl_trylock())
166 return restart_syscall();
167
David Decotigny8ae6dac2011-04-27 18:32:38 +0000168 if (netif_running(netdev)) {
169 struct ethtool_cmd cmd;
Nikolay Aleksandrovc6c13962012-09-05 04:11:28 +0000170 if (!__ethtool_get_settings(netdev, &cmd)) {
171 const char *duplex;
172 switch (cmd.duplex) {
173 case DUPLEX_HALF:
174 duplex = "half";
175 break;
176 case DUPLEX_FULL:
177 duplex = "full";
178 break;
179 default:
180 duplex = "unknown";
181 break;
182 }
183 ret = sprintf(buf, "%s\n", duplex);
184 }
Andy Gospodarekd519e172009-10-02 09:26:12 +0000185 }
186 rtnl_unlock();
187 return ret;
188}
189
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700190static ssize_t show_dormant(struct device *dev,
191 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800192{
193 struct net_device *netdev = to_net_dev(dev);
194
195 if (netif_running(netdev))
196 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
197
198 return -EINVAL;
199}
200
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700201static const char *const operstates[] = {
Stefan Rompfb00055a2006-03-20 17:09:11 -0800202 "unknown",
203 "notpresent", /* currently unused */
204 "down",
205 "lowerlayerdown",
206 "testing", /* currently unused */
207 "dormant",
208 "up"
209};
210
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700211static ssize_t show_operstate(struct device *dev,
212 struct device_attribute *attr, char *buf)
Stefan Rompfb00055a2006-03-20 17:09:11 -0800213{
214 const struct net_device *netdev = to_net_dev(dev);
215 unsigned char operstate;
216
217 read_lock(&dev_base_lock);
218 operstate = netdev->operstate;
219 if (!netif_running(netdev))
220 operstate = IF_OPER_DOWN;
221 read_unlock(&dev_base_lock);
222
Adrian Bunke3a5cd92006-04-05 22:19:47 -0700223 if (operstate >= ARRAY_SIZE(operstates))
Stefan Rompfb00055a2006-03-20 17:09:11 -0800224 return -EINVAL; /* should not happen */
225
226 return sprintf(buf, "%s\n", operstates[operstate]);
227}
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229/* read-write attributes */
230NETDEVICE_SHOW(mtu, fmt_dec);
231
232static int change_mtu(struct net_device *net, unsigned long new_mtu)
233{
234 return dev_set_mtu(net, (int) new_mtu);
235}
236
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700237static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
238 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700240 return netdev_store(dev, attr, buf, len, change_mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243NETDEVICE_SHOW(flags, fmt_hex);
244
245static int change_flags(struct net_device *net, unsigned long new_flags)
246{
Eric Dumazet95c96172012-04-15 05:58:06 +0000247 return dev_change_flags(net, (unsigned int) new_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248}
249
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700250static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
251 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700253 return netdev_store(dev, attr, buf, len, change_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
257
258static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
259{
260 net->tx_queue_len = new_len;
261 return 0;
262}
263
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700264static ssize_t store_tx_queue_len(struct device *dev,
265 struct device_attribute *attr,
266 const char *buf, size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000268 if (!capable(CAP_NET_ADMIN))
269 return -EPERM;
270
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700271 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700274static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
275 const char *buf, size_t len)
276{
277 struct net_device *netdev = to_net_dev(dev);
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000278 struct net *net = dev_net(netdev);
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700279 size_t count = len;
280 ssize_t ret;
281
Eric W. Biederman5e1fccc2012-11-16 03:03:04 +0000282 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700283 return -EPERM;
284
285 /* ignore trailing newline */
286 if (len > 0 && buf[len - 1] == '\n')
287 --count;
288
Eric W. Biederman336ca572009-05-13 16:57:25 +0000289 if (!rtnl_trylock())
290 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700291 ret = dev_set_alias(netdev, buf, count);
292 rtnl_unlock();
293
294 return ret < 0 ? ret : len;
295}
296
297static ssize_t show_ifalias(struct device *dev,
298 struct device_attribute *attr, char *buf)
299{
300 const struct net_device *netdev = to_net_dev(dev);
301 ssize_t ret = 0;
302
Eric W. Biederman336ca572009-05-13 16:57:25 +0000303 if (!rtnl_trylock())
304 return restart_syscall();
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700305 if (netdev->ifalias)
306 ret = sprintf(buf, "%s\n", netdev->ifalias);
307 rtnl_unlock();
308 return ret;
309}
310
Vlad Dogarua512b922011-01-24 03:37:29 +0000311NETDEVICE_SHOW(group, fmt_dec);
312
313static int change_group(struct net_device *net, unsigned long new_group)
314{
315 dev_set_group(net, (int) new_group);
316 return 0;
317}
318
319static ssize_t store_group(struct device *dev, struct device_attribute *attr,
320 const char *buf, size_t len)
321{
322 return netdev_store(dev, attr, buf, len, change_group);
323}
324
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700325static struct device_attribute net_class_attributes[] = {
Stefan Assmannc1f79422010-07-22 02:50:21 +0000326 __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100327 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
David Woodhouse9d296722008-04-20 16:07:43 -0700328 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
Stephen Hemminger0b815a12008-09-22 21:28:11 -0700329 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100330 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
331 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100332 __ATTR(type, S_IRUGO, show_type, NULL),
Stefan Rompfb00055a2006-03-20 17:09:11 -0800333 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100334 __ATTR(address, S_IRUGO, show_address, NULL),
335 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
336 __ATTR(carrier, S_IRUGO, show_carrier, NULL),
Andy Gospodarekd519e172009-10-02 09:26:12 +0000337 __ATTR(speed, S_IRUGO, show_speed, NULL),
338 __ATTR(duplex, S_IRUGO, show_duplex, NULL),
Stefan Rompfb00055a2006-03-20 17:09:11 -0800339 __ATTR(dormant, S_IRUGO, show_dormant, NULL),
340 __ATTR(operstate, S_IRUGO, show_operstate, NULL),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100341 __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
342 __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
343 __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
344 store_tx_queue_len),
Xiaotian Fengb6644cb2011-02-09 19:16:15 -0800345 __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
Kay Sieversfd586ba2005-12-19 01:42:56 +0100346 {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347};
348
349/* Show a given an attribute in the statistics group */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700350static ssize_t netstat_show(const struct device *d,
351 struct device_attribute *attr, char *buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 unsigned long offset)
353{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700354 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 ssize_t ret = -EINVAL;
356
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000357 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
358 offset % sizeof(u64) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 read_lock(&dev_base_lock);
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700361 if (dev_isalive(dev)) {
Eric Dumazet28172732010-07-07 14:58:56 -0700362 struct rtnl_link_stats64 temp;
363 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
364
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000365 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
Pavel Emelyanov96e74082008-05-21 14:12:46 -0700366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 read_unlock(&dev_base_lock);
368 return ret;
369}
370
371/* generate a read-only statistics attribute */
372#define NETSTAT_ENTRY(name) \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700373static ssize_t show_##name(struct device *d, \
374 struct device_attribute *attr, char *buf) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{ \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700376 return netstat_show(d, attr, buf, \
Ben Hutchingsbe1f3c22010-06-08 07:19:54 +0000377 offsetof(struct rtnl_link_stats64, name)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378} \
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700379static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381NETSTAT_ENTRY(rx_packets);
382NETSTAT_ENTRY(tx_packets);
383NETSTAT_ENTRY(rx_bytes);
384NETSTAT_ENTRY(tx_bytes);
385NETSTAT_ENTRY(rx_errors);
386NETSTAT_ENTRY(tx_errors);
387NETSTAT_ENTRY(rx_dropped);
388NETSTAT_ENTRY(tx_dropped);
389NETSTAT_ENTRY(multicast);
390NETSTAT_ENTRY(collisions);
391NETSTAT_ENTRY(rx_length_errors);
392NETSTAT_ENTRY(rx_over_errors);
393NETSTAT_ENTRY(rx_crc_errors);
394NETSTAT_ENTRY(rx_frame_errors);
395NETSTAT_ENTRY(rx_fifo_errors);
396NETSTAT_ENTRY(rx_missed_errors);
397NETSTAT_ENTRY(tx_aborted_errors);
398NETSTAT_ENTRY(tx_carrier_errors);
399NETSTAT_ENTRY(tx_fifo_errors);
400NETSTAT_ENTRY(tx_heartbeat_errors);
401NETSTAT_ENTRY(tx_window_errors);
402NETSTAT_ENTRY(rx_compressed);
403NETSTAT_ENTRY(tx_compressed);
404
405static struct attribute *netstat_attrs[] = {
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -0700406 &dev_attr_rx_packets.attr,
407 &dev_attr_tx_packets.attr,
408 &dev_attr_rx_bytes.attr,
409 &dev_attr_tx_bytes.attr,
410 &dev_attr_rx_errors.attr,
411 &dev_attr_tx_errors.attr,
412 &dev_attr_rx_dropped.attr,
413 &dev_attr_tx_dropped.attr,
414 &dev_attr_multicast.attr,
415 &dev_attr_collisions.attr,
416 &dev_attr_rx_length_errors.attr,
417 &dev_attr_rx_over_errors.attr,
418 &dev_attr_rx_crc_errors.attr,
419 &dev_attr_rx_frame_errors.attr,
420 &dev_attr_rx_fifo_errors.attr,
421 &dev_attr_rx_missed_errors.attr,
422 &dev_attr_tx_aborted_errors.attr,
423 &dev_attr_tx_carrier_errors.attr,
424 &dev_attr_tx_fifo_errors.attr,
425 &dev_attr_tx_heartbeat_errors.attr,
426 &dev_attr_tx_window_errors.attr,
427 &dev_attr_rx_compressed.attr,
428 &dev_attr_tx_compressed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 NULL
430};
431
432
433static struct attribute_group netstat_group = {
434 .name = "statistics",
435 .attrs = netstat_attrs,
436};
Eric W. Biedermand6523dd2010-05-16 21:59:45 -0700437#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Stephen Rothwell30bde1f2010-03-29 01:00:44 -0700439#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000440/*
441 * RX queue sysfs structures and functions.
442 */
443struct rx_queue_attribute {
444 struct attribute attr;
445 ssize_t (*show)(struct netdev_rx_queue *queue,
446 struct rx_queue_attribute *attr, char *buf);
447 ssize_t (*store)(struct netdev_rx_queue *queue,
448 struct rx_queue_attribute *attr, const char *buf, size_t len);
449};
450#define to_rx_queue_attr(_attr) container_of(_attr, \
451 struct rx_queue_attribute, attr)
452
453#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
454
455static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
456 char *buf)
457{
458 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
459 struct netdev_rx_queue *queue = to_rx_queue(kobj);
460
461 if (!attribute->show)
462 return -EIO;
463
464 return attribute->show(queue, attribute, buf);
465}
466
467static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
468 const char *buf, size_t count)
469{
470 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
471 struct netdev_rx_queue *queue = to_rx_queue(kobj);
472
473 if (!attribute->store)
474 return -EIO;
475
476 return attribute->store(queue, attribute, buf, count);
477}
478
stephen hemmingerfa50d642010-08-31 12:14:13 +0000479static const struct sysfs_ops rx_queue_sysfs_ops = {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000480 .show = rx_queue_attr_show,
481 .store = rx_queue_attr_store,
482};
483
484static ssize_t show_rps_map(struct netdev_rx_queue *queue,
485 struct rx_queue_attribute *attribute, char *buf)
486{
487 struct rps_map *map;
488 cpumask_var_t mask;
489 size_t len = 0;
490 int i;
491
492 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
493 return -ENOMEM;
494
495 rcu_read_lock();
496 map = rcu_dereference(queue->rps_map);
497 if (map)
498 for (i = 0; i < map->len; i++)
499 cpumask_set_cpu(map->cpus[i], mask);
500
501 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
502 if (PAGE_SIZE - len < 3) {
503 rcu_read_unlock();
504 free_cpumask_var(mask);
505 return -EINVAL;
506 }
507 rcu_read_unlock();
508
509 free_cpumask_var(mask);
510 len += sprintf(buf + len, "\n");
511 return len;
512}
513
Eric Dumazetf5acb902010-04-19 14:40:57 -0700514static ssize_t store_rps_map(struct netdev_rx_queue *queue,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000515 struct rx_queue_attribute *attribute,
516 const char *buf, size_t len)
517{
518 struct rps_map *old_map, *map;
519 cpumask_var_t mask;
520 int err, cpu, i;
521 static DEFINE_SPINLOCK(rps_map_lock);
522
523 if (!capable(CAP_NET_ADMIN))
524 return -EPERM;
525
526 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
527 return -ENOMEM;
528
529 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
530 if (err) {
531 free_cpumask_var(mask);
532 return err;
533 }
534
Eric Dumazet95c96172012-04-15 05:58:06 +0000535 map = kzalloc(max_t(unsigned int,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000536 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
537 GFP_KERNEL);
538 if (!map) {
539 free_cpumask_var(mask);
540 return -ENOMEM;
541 }
542
543 i = 0;
544 for_each_cpu_and(cpu, mask, cpu_online_mask)
545 map->cpus[i++] = cpu;
546
547 if (i)
548 map->len = i;
549 else {
550 kfree(map);
551 map = NULL;
552 }
553
554 spin_lock(&rps_map_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000555 old_map = rcu_dereference_protected(queue->rps_map,
556 lockdep_is_held(&rps_map_lock));
Tom Herbert0a9627f2010-03-16 08:03:29 +0000557 rcu_assign_pointer(queue->rps_map, map);
558 spin_unlock(&rps_map_lock);
559
Eric Dumazetadc93002011-11-17 03:13:26 +0000560 if (map)
Ingo Molnarc5905af2012-02-24 08:31:31 +0100561 static_key_slow_inc(&rps_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +0000562 if (old_map) {
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800563 kfree_rcu(old_map, rcu);
Ingo Molnarc5905af2012-02-24 08:31:31 +0100564 static_key_slow_dec(&rps_needed);
Eric Dumazetadc93002011-11-17 03:13:26 +0000565 }
Tom Herbert0a9627f2010-03-16 08:03:29 +0000566 free_cpumask_var(mask);
567 return len;
568}
569
Tom Herbertfec5e652010-04-16 16:01:27 -0700570static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
571 struct rx_queue_attribute *attr,
572 char *buf)
573{
574 struct rps_dev_flow_table *flow_table;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000575 unsigned long val = 0;
Tom Herbertfec5e652010-04-16 16:01:27 -0700576
577 rcu_read_lock();
578 flow_table = rcu_dereference(queue->rps_flow_table);
579 if (flow_table)
Eric Dumazet60b778c2011-12-24 06:56:49 +0000580 val = (unsigned long)flow_table->mask + 1;
Tom Herbertfec5e652010-04-16 16:01:27 -0700581 rcu_read_unlock();
582
Eric Dumazet60b778c2011-12-24 06:56:49 +0000583 return sprintf(buf, "%lu\n", val);
Tom Herbertfec5e652010-04-16 16:01:27 -0700584}
585
586static void rps_dev_flow_table_release_work(struct work_struct *work)
587{
588 struct rps_dev_flow_table *table = container_of(work,
589 struct rps_dev_flow_table, free_work);
590
591 vfree(table);
592}
593
594static void rps_dev_flow_table_release(struct rcu_head *rcu)
595{
596 struct rps_dev_flow_table *table = container_of(rcu,
597 struct rps_dev_flow_table, rcu);
598
599 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
600 schedule_work(&table->free_work);
601}
602
Eric Dumazetf5acb902010-04-19 14:40:57 -0700603static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
Tom Herbertfec5e652010-04-16 16:01:27 -0700604 struct rx_queue_attribute *attr,
605 const char *buf, size_t len)
606{
Eric Dumazet60b778c2011-12-24 06:56:49 +0000607 unsigned long mask, count;
Tom Herbertfec5e652010-04-16 16:01:27 -0700608 struct rps_dev_flow_table *table, *old_table;
609 static DEFINE_SPINLOCK(rps_dev_flow_lock);
Eric Dumazet60b778c2011-12-24 06:56:49 +0000610 int rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700611
612 if (!capable(CAP_NET_ADMIN))
613 return -EPERM;
614
Eric Dumazet60b778c2011-12-24 06:56:49 +0000615 rc = kstrtoul(buf, 0, &count);
616 if (rc < 0)
617 return rc;
Tom Herbertfec5e652010-04-16 16:01:27 -0700618
619 if (count) {
Eric Dumazet60b778c2011-12-24 06:56:49 +0000620 mask = count - 1;
621 /* mask = roundup_pow_of_two(count) - 1;
622 * without overflows...
623 */
624 while ((mask | (mask >> 1)) != mask)
625 mask |= (mask >> 1);
626 /* On 64 bit arches, must check mask fits in table->mask (u32),
627 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
628 * doesnt overflow.
629 */
630#if BITS_PER_LONG > 32
631 if (mask > (unsigned long)(u32)mask)
Xi Wanga0a129f2011-12-22 13:35:22 +0000632 return -EINVAL;
Eric Dumazet60b778c2011-12-24 06:56:49 +0000633#else
634 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
Xi Wanga0a129f2011-12-22 13:35:22 +0000635 / sizeof(struct rps_dev_flow)) {
Tom Herbertfec5e652010-04-16 16:01:27 -0700636 /* Enforce a limit to prevent overflow */
637 return -EINVAL;
638 }
Eric Dumazet60b778c2011-12-24 06:56:49 +0000639#endif
640 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
Tom Herbertfec5e652010-04-16 16:01:27 -0700641 if (!table)
642 return -ENOMEM;
643
Eric Dumazet60b778c2011-12-24 06:56:49 +0000644 table->mask = mask;
645 for (count = 0; count <= mask; count++)
646 table->flows[count].cpu = RPS_NO_CPU;
Tom Herbertfec5e652010-04-16 16:01:27 -0700647 } else
648 table = NULL;
649
650 spin_lock(&rps_dev_flow_lock);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000651 old_table = rcu_dereference_protected(queue->rps_flow_table,
652 lockdep_is_held(&rps_dev_flow_lock));
Tom Herbertfec5e652010-04-16 16:01:27 -0700653 rcu_assign_pointer(queue->rps_flow_table, table);
654 spin_unlock(&rps_dev_flow_lock);
655
656 if (old_table)
657 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
658
659 return len;
660}
661
Tom Herbert0a9627f2010-03-16 08:03:29 +0000662static struct rx_queue_attribute rps_cpus_attribute =
663 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
664
Tom Herbertfec5e652010-04-16 16:01:27 -0700665
666static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
667 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
668 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
669
Tom Herbert0a9627f2010-03-16 08:03:29 +0000670static struct attribute *rx_queue_default_attrs[] = {
671 &rps_cpus_attribute.attr,
Tom Herbertfec5e652010-04-16 16:01:27 -0700672 &rps_dev_flow_table_cnt_attribute.attr,
Tom Herbert0a9627f2010-03-16 08:03:29 +0000673 NULL
674};
675
676static void rx_queue_release(struct kobject *kobj)
677{
678 struct netdev_rx_queue *queue = to_rx_queue(kobj);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000679 struct rps_map *map;
680 struct rps_dev_flow_table *flow_table;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000681
Tom Herbertfec5e652010-04-16 16:01:27 -0700682
Eric Dumazet33d480c2011-08-11 19:30:52 +0000683 map = rcu_dereference_protected(queue->rps_map, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000684 if (map) {
685 RCU_INIT_POINTER(queue->rps_map, NULL);
Lai Jiangshanf6f80232011-03-18 12:01:31 +0800686 kfree_rcu(map, rcu);
John Fastabend9ea19482010-11-16 06:31:39 +0000687 }
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000688
Eric Dumazet33d480c2011-08-11 19:30:52 +0000689 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
John Fastabend9ea19482010-11-16 06:31:39 +0000690 if (flow_table) {
691 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
Eric Dumazet6e3f7fa2010-10-25 03:02:02 +0000692 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
John Fastabend9ea19482010-11-16 06:31:39 +0000693 }
Tom Herbert0a9627f2010-03-16 08:03:29 +0000694
John Fastabend9ea19482010-11-16 06:31:39 +0000695 memset(kobj, 0, sizeof(*kobj));
Tom Herbertfe822242010-11-09 10:47:38 +0000696 dev_put(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000697}
698
699static struct kobj_type rx_queue_ktype = {
700 .sysfs_ops = &rx_queue_sysfs_ops,
701 .release = rx_queue_release,
702 .default_attrs = rx_queue_default_attrs,
703};
704
705static int rx_queue_add_kobject(struct net_device *net, int index)
706{
707 struct netdev_rx_queue *queue = net->_rx + index;
708 struct kobject *kobj = &queue->kobj;
709 int error = 0;
710
711 kobj->kset = net->queues_kset;
712 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
713 "rx-%u", index);
714 if (error) {
715 kobject_put(kobj);
716 return error;
717 }
718
719 kobject_uevent(kobj, KOBJ_ADD);
Tom Herbertfe822242010-11-09 10:47:38 +0000720 dev_hold(queue->dev);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000721
722 return error;
723}
Tom Herbertbf264142010-11-26 08:36:09 +0000724#endif /* CONFIG_RPS */
Tom Herbert0a9627f2010-03-16 08:03:29 +0000725
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000726int
727net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
Tom Herbert0a9627f2010-03-16 08:03:29 +0000728{
Tom Herbertbf264142010-11-26 08:36:09 +0000729#ifdef CONFIG_RPS
Tom Herbert0a9627f2010-03-16 08:03:29 +0000730 int i;
731 int error = 0;
732
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000733 for (i = old_num; i < new_num; i++) {
Tom Herbert0a9627f2010-03-16 08:03:29 +0000734 error = rx_queue_add_kobject(net, i);
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000735 if (error) {
736 new_num = old_num;
Tom Herbert0a9627f2010-03-16 08:03:29 +0000737 break;
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000738 }
Tom Herbert0a9627f2010-03-16 08:03:29 +0000739 }
740
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000741 while (--i >= new_num)
742 kobject_put(&net->_rx[i].kobj);
Tom Herbert0a9627f2010-03-16 08:03:29 +0000743
744 return error;
Tom Herbertbf264142010-11-26 08:36:09 +0000745#else
746 return 0;
747#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +0000748}
749
david decotignyccf5ff62011-11-16 12:15:10 +0000750#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000751/*
752 * netdev_queue sysfs structures and functions.
753 */
754struct netdev_queue_attribute {
755 struct attribute attr;
756 ssize_t (*show)(struct netdev_queue *queue,
757 struct netdev_queue_attribute *attr, char *buf);
758 ssize_t (*store)(struct netdev_queue *queue,
759 struct netdev_queue_attribute *attr, const char *buf, size_t len);
760};
761#define to_netdev_queue_attr(_attr) container_of(_attr, \
762 struct netdev_queue_attribute, attr)
763
764#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
765
766static ssize_t netdev_queue_attr_show(struct kobject *kobj,
767 struct attribute *attr, char *buf)
Ben Hutchings62fe0b42010-09-27 08:24:33 +0000768{
Tom Herbert1d24eb42010-11-21 13:17:27 +0000769 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
770 struct netdev_queue *queue = to_netdev_queue(kobj);
771
772 if (!attribute->show)
773 return -EIO;
774
775 return attribute->show(queue, attribute, buf);
776}
777
778static ssize_t netdev_queue_attr_store(struct kobject *kobj,
779 struct attribute *attr,
780 const char *buf, size_t count)
781{
782 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
783 struct netdev_queue *queue = to_netdev_queue(kobj);
784
785 if (!attribute->store)
786 return -EIO;
787
788 return attribute->store(queue, attribute, buf, count);
789}
790
791static const struct sysfs_ops netdev_queue_sysfs_ops = {
792 .show = netdev_queue_attr_show,
793 .store = netdev_queue_attr_store,
794};
795
david decotignyccf5ff62011-11-16 12:15:10 +0000796static ssize_t show_trans_timeout(struct netdev_queue *queue,
797 struct netdev_queue_attribute *attribute,
798 char *buf)
799{
800 unsigned long trans_timeout;
801
802 spin_lock_irq(&queue->_xmit_lock);
803 trans_timeout = queue->trans_timeout;
804 spin_unlock_irq(&queue->_xmit_lock);
805
806 return sprintf(buf, "%lu", trans_timeout);
807}
808
809static struct netdev_queue_attribute queue_trans_timeout =
810 __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
811
Tom Herbert114cf582011-11-28 16:33:09 +0000812#ifdef CONFIG_BQL
813/*
814 * Byte queue limits sysfs structures and functions.
815 */
816static ssize_t bql_show(char *buf, unsigned int value)
817{
818 return sprintf(buf, "%u\n", value);
819}
820
821static ssize_t bql_set(const char *buf, const size_t count,
822 unsigned int *pvalue)
823{
824 unsigned int value;
825 int err;
826
827 if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
828 value = DQL_MAX_LIMIT;
829 else {
830 err = kstrtouint(buf, 10, &value);
831 if (err < 0)
832 return err;
833 if (value > DQL_MAX_LIMIT)
834 return -EINVAL;
835 }
836
837 *pvalue = value;
838
839 return count;
840}
841
842static ssize_t bql_show_hold_time(struct netdev_queue *queue,
843 struct netdev_queue_attribute *attr,
844 char *buf)
845{
846 struct dql *dql = &queue->dql;
847
848 return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
849}
850
851static ssize_t bql_set_hold_time(struct netdev_queue *queue,
852 struct netdev_queue_attribute *attribute,
853 const char *buf, size_t len)
854{
855 struct dql *dql = &queue->dql;
Eric Dumazet95c96172012-04-15 05:58:06 +0000856 unsigned int value;
Tom Herbert114cf582011-11-28 16:33:09 +0000857 int err;
858
859 err = kstrtouint(buf, 10, &value);
860 if (err < 0)
861 return err;
862
863 dql->slack_hold_time = msecs_to_jiffies(value);
864
865 return len;
866}
867
868static struct netdev_queue_attribute bql_hold_time_attribute =
869 __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
870 bql_set_hold_time);
871
872static ssize_t bql_show_inflight(struct netdev_queue *queue,
873 struct netdev_queue_attribute *attr,
874 char *buf)
875{
876 struct dql *dql = &queue->dql;
877
878 return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
879}
880
881static struct netdev_queue_attribute bql_inflight_attribute =
Hiroaki SHIMODA795d9a22012-01-14 07:10:21 +0000882 __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
Tom Herbert114cf582011-11-28 16:33:09 +0000883
884#define BQL_ATTR(NAME, FIELD) \
885static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
886 struct netdev_queue_attribute *attr, \
887 char *buf) \
888{ \
889 return bql_show(buf, queue->dql.FIELD); \
890} \
891 \
892static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
893 struct netdev_queue_attribute *attr, \
894 const char *buf, size_t len) \
895{ \
896 return bql_set(buf, len, &queue->dql.FIELD); \
897} \
898 \
899static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
900 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
901 bql_set_ ## NAME);
902
903BQL_ATTR(limit, limit)
904BQL_ATTR(limit_max, max_limit)
905BQL_ATTR(limit_min, min_limit)
906
907static struct attribute *dql_attrs[] = {
908 &bql_limit_attribute.attr,
909 &bql_limit_max_attribute.attr,
910 &bql_limit_min_attribute.attr,
911 &bql_hold_time_attribute.attr,
912 &bql_inflight_attribute.attr,
913 NULL
914};
915
916static struct attribute_group dql_group = {
917 .name = "byte_queue_limits",
918 .attrs = dql_attrs,
919};
920#endif /* CONFIG_BQL */
921
david decotignyccf5ff62011-11-16 12:15:10 +0000922#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +0000923static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
924{
925 struct net_device *dev = queue->dev;
926 int i;
927
928 for (i = 0; i < dev->num_tx_queues; i++)
929 if (queue == &dev->_tx[i])
930 break;
931
932 BUG_ON(i >= dev->num_tx_queues);
933
934 return i;
935}
936
937
938static ssize_t show_xps_map(struct netdev_queue *queue,
939 struct netdev_queue_attribute *attribute, char *buf)
940{
941 struct net_device *dev = queue->dev;
942 struct xps_dev_maps *dev_maps;
943 cpumask_var_t mask;
944 unsigned long index;
945 size_t len = 0;
946 int i;
947
948 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
949 return -ENOMEM;
950
951 index = get_netdev_queue_index(queue);
952
953 rcu_read_lock();
954 dev_maps = rcu_dereference(dev->xps_maps);
955 if (dev_maps) {
956 for_each_possible_cpu(i) {
957 struct xps_map *map =
958 rcu_dereference(dev_maps->cpu_map[i]);
959 if (map) {
960 int j;
961 for (j = 0; j < map->len; j++) {
962 if (map->queues[j] == index) {
963 cpumask_set_cpu(i, mask);
964 break;
965 }
966 }
967 }
968 }
969 }
970 rcu_read_unlock();
971
972 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
973 if (PAGE_SIZE - len < 3) {
974 free_cpumask_var(mask);
975 return -EINVAL;
976 }
977
978 free_cpumask_var(mask);
979 len += sprintf(buf + len, "\n");
980 return len;
981}
982
Tom Herbert1d24eb42010-11-21 13:17:27 +0000983static DEFINE_MUTEX(xps_map_mutex);
Eric Dumazeta4177862010-11-28 21:43:02 +0000984#define xmap_dereference(P) \
985 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
Tom Herbert1d24eb42010-11-21 13:17:27 +0000986
Tom Herbert927fbec2011-11-28 16:33:02 +0000987static void xps_queue_release(struct netdev_queue *queue)
988{
989 struct net_device *dev = queue->dev;
990 struct xps_dev_maps *dev_maps;
991 struct xps_map *map;
992 unsigned long index;
993 int i, pos, nonempty = 0;
994
995 index = get_netdev_queue_index(queue);
996
997 mutex_lock(&xps_map_mutex);
998 dev_maps = xmap_dereference(dev->xps_maps);
999
1000 if (dev_maps) {
1001 for_each_possible_cpu(i) {
1002 map = xmap_dereference(dev_maps->cpu_map[i]);
1003 if (!map)
1004 continue;
1005
1006 for (pos = 0; pos < map->len; pos++)
1007 if (map->queues[pos] == index)
1008 break;
1009
1010 if (pos < map->len) {
1011 if (map->len > 1)
1012 map->queues[pos] =
1013 map->queues[--map->len];
1014 else {
1015 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1016 NULL);
1017 kfree_rcu(map, rcu);
1018 map = NULL;
1019 }
1020 }
1021 if (map)
1022 nonempty = 1;
1023 }
1024
1025 if (!nonempty) {
1026 RCU_INIT_POINTER(dev->xps_maps, NULL);
1027 kfree_rcu(dev_maps, rcu);
1028 }
1029 }
1030 mutex_unlock(&xps_map_mutex);
1031}
1032
Tom Herbert1d24eb42010-11-21 13:17:27 +00001033static ssize_t store_xps_map(struct netdev_queue *queue,
1034 struct netdev_queue_attribute *attribute,
1035 const char *buf, size_t len)
1036{
1037 struct net_device *dev = queue->dev;
1038 cpumask_var_t mask;
1039 int err, i, cpu, pos, map_len, alloc_len, need_set;
1040 unsigned long index;
1041 struct xps_map *map, *new_map;
1042 struct xps_dev_maps *dev_maps, *new_dev_maps;
1043 int nonempty = 0;
david decotigny19b05f82011-11-16 12:15:08 +00001044 int numa_node_id = -2;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001045
1046 if (!capable(CAP_NET_ADMIN))
1047 return -EPERM;
1048
1049 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1050 return -ENOMEM;
1051
1052 index = get_netdev_queue_index(queue);
1053
1054 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1055 if (err) {
1056 free_cpumask_var(mask);
1057 return err;
1058 }
1059
Eric Dumazet95c96172012-04-15 05:58:06 +00001060 new_dev_maps = kzalloc(max_t(unsigned int,
Tom Herbert1d24eb42010-11-21 13:17:27 +00001061 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1062 if (!new_dev_maps) {
1063 free_cpumask_var(mask);
1064 return -ENOMEM;
1065 }
1066
1067 mutex_lock(&xps_map_mutex);
1068
Eric Dumazeta4177862010-11-28 21:43:02 +00001069 dev_maps = xmap_dereference(dev->xps_maps);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001070
1071 for_each_possible_cpu(cpu) {
Eric Dumazeta4177862010-11-28 21:43:02 +00001072 map = dev_maps ?
1073 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1074 new_map = map;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001075 if (map) {
1076 for (pos = 0; pos < map->len; pos++)
1077 if (map->queues[pos] == index)
1078 break;
1079 map_len = map->len;
1080 alloc_len = map->alloc_len;
1081 } else
1082 pos = map_len = alloc_len = 0;
1083
KOSAKI Motohiro2142c132011-05-16 11:53:49 -07001084 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
Eric Dumazetf2cd2d32010-11-29 08:14:37 +00001085#ifdef CONFIG_NUMA
1086 if (need_set) {
david decotigny19b05f82011-11-16 12:15:08 +00001087 if (numa_node_id == -2)
1088 numa_node_id = cpu_to_node(cpu);
1089 else if (numa_node_id != cpu_to_node(cpu))
1090 numa_node_id = -1;
Eric Dumazetf2cd2d32010-11-29 08:14:37 +00001091 }
1092#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001093 if (need_set && pos >= map_len) {
1094 /* Need to add queue to this CPU's map */
1095 if (map_len >= alloc_len) {
1096 alloc_len = alloc_len ?
1097 2 * alloc_len : XPS_MIN_MAP_ALLOC;
Eric Dumazetb02038a2010-11-28 05:43:24 +00001098 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1099 GFP_KERNEL,
1100 cpu_to_node(cpu));
Tom Herbert1d24eb42010-11-21 13:17:27 +00001101 if (!new_map)
1102 goto error;
1103 new_map->alloc_len = alloc_len;
1104 for (i = 0; i < map_len; i++)
1105 new_map->queues[i] = map->queues[i];
1106 new_map->len = map_len;
1107 }
1108 new_map->queues[new_map->len++] = index;
1109 } else if (!need_set && pos < map_len) {
1110 /* Need to remove queue from this CPU's map */
1111 if (map_len > 1)
1112 new_map->queues[pos] =
1113 new_map->queues[--new_map->len];
1114 else
1115 new_map = NULL;
1116 }
Eric Dumazeta4177862010-11-28 21:43:02 +00001117 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001118 }
1119
1120 /* Cleanup old maps */
1121 for_each_possible_cpu(cpu) {
Eric Dumazeta4177862010-11-28 21:43:02 +00001122 map = dev_maps ?
1123 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1124 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
Lai Jiangshanedc86d82011-03-18 12:02:20 +08001125 kfree_rcu(map, rcu);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001126 if (new_dev_maps->cpu_map[cpu])
1127 nonempty = 1;
1128 }
1129
Eric Dumazetcf778b02012-01-12 04:41:32 +00001130 if (nonempty) {
1131 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1132 } else {
Tom Herbert1d24eb42010-11-21 13:17:27 +00001133 kfree(new_dev_maps);
Stephen Hemmingera9b3cd72011-08-01 16:19:00 +00001134 RCU_INIT_POINTER(dev->xps_maps, NULL);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001135 }
1136
1137 if (dev_maps)
Lai Jiangshanb55071e2011-03-18 12:02:47 +08001138 kfree_rcu(dev_maps, rcu);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001139
david decotigny19b05f82011-11-16 12:15:08 +00001140 netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
Changli Gaob236da62010-12-14 03:09:15 +00001141 NUMA_NO_NODE);
Eric Dumazetf2cd2d32010-11-29 08:14:37 +00001142
Tom Herbert1d24eb42010-11-21 13:17:27 +00001143 mutex_unlock(&xps_map_mutex);
1144
1145 free_cpumask_var(mask);
1146 return len;
1147
1148error:
1149 mutex_unlock(&xps_map_mutex);
1150
1151 if (new_dev_maps)
1152 for_each_possible_cpu(i)
Eric Dumazeta4177862010-11-28 21:43:02 +00001153 kfree(rcu_dereference_protected(
1154 new_dev_maps->cpu_map[i],
1155 1));
Tom Herbert1d24eb42010-11-21 13:17:27 +00001156 kfree(new_dev_maps);
1157 free_cpumask_var(mask);
1158 return -ENOMEM;
1159}
1160
1161static struct netdev_queue_attribute xps_cpus_attribute =
1162 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
david decotignyccf5ff62011-11-16 12:15:10 +00001163#endif /* CONFIG_XPS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001164
1165static struct attribute *netdev_queue_default_attrs[] = {
david decotignyccf5ff62011-11-16 12:15:10 +00001166 &queue_trans_timeout.attr,
1167#ifdef CONFIG_XPS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001168 &xps_cpus_attribute.attr,
david decotignyccf5ff62011-11-16 12:15:10 +00001169#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001170 NULL
1171};
1172
1173static void netdev_queue_release(struct kobject *kobj)
1174{
1175 struct netdev_queue *queue = to_netdev_queue(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001176
Tom Herbert114cf582011-11-28 16:33:09 +00001177#ifdef CONFIG_XPS
Tom Herbert927fbec2011-11-28 16:33:02 +00001178 xps_queue_release(queue);
Tom Herbert114cf582011-11-28 16:33:09 +00001179#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001180
1181 memset(kobj, 0, sizeof(*kobj));
1182 dev_put(queue->dev);
1183}
1184
1185static struct kobj_type netdev_queue_ktype = {
1186 .sysfs_ops = &netdev_queue_sysfs_ops,
1187 .release = netdev_queue_release,
1188 .default_attrs = netdev_queue_default_attrs,
1189};
1190
1191static int netdev_queue_add_kobject(struct net_device *net, int index)
1192{
1193 struct netdev_queue *queue = net->_tx + index;
1194 struct kobject *kobj = &queue->kobj;
1195 int error = 0;
1196
1197 kobj->kset = net->queues_kset;
1198 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1199 "tx-%u", index);
Tom Herbert114cf582011-11-28 16:33:09 +00001200 if (error)
1201 goto exit;
1202
1203#ifdef CONFIG_BQL
1204 error = sysfs_create_group(kobj, &dql_group);
1205 if (error)
1206 goto exit;
1207#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001208
1209 kobject_uevent(kobj, KOBJ_ADD);
1210 dev_hold(queue->dev);
1211
Tom Herbert114cf582011-11-28 16:33:09 +00001212 return 0;
1213exit:
1214 kobject_put(kobj);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001215 return error;
1216}
david decotignyccf5ff62011-11-16 12:15:10 +00001217#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001218
1219int
1220netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1221{
david decotignyccf5ff62011-11-16 12:15:10 +00001222#ifdef CONFIG_SYSFS
Tom Herbert1d24eb42010-11-21 13:17:27 +00001223 int i;
1224 int error = 0;
1225
1226 for (i = old_num; i < new_num; i++) {
1227 error = netdev_queue_add_kobject(net, i);
1228 if (error) {
1229 new_num = old_num;
1230 break;
1231 }
1232 }
1233
Tom Herbert114cf582011-11-28 16:33:09 +00001234 while (--i >= new_num) {
1235 struct netdev_queue *queue = net->_tx + i;
1236
1237#ifdef CONFIG_BQL
1238 sysfs_remove_group(&queue->kobj, &dql_group);
1239#endif
1240 kobject_put(&queue->kobj);
1241 }
Tom Herbert1d24eb42010-11-21 13:17:27 +00001242
1243 return error;
Tom Herbertbf264142010-11-26 08:36:09 +00001244#else
1245 return 0;
david decotignyccf5ff62011-11-16 12:15:10 +00001246#endif /* CONFIG_SYSFS */
Tom Herbert1d24eb42010-11-21 13:17:27 +00001247}
1248
1249static int register_queue_kobjects(struct net_device *net)
1250{
Tom Herbertbf264142010-11-26 08:36:09 +00001251 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001252
david decotignyccf5ff62011-11-16 12:15:10 +00001253#ifdef CONFIG_SYSFS
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001254 net->queues_kset = kset_create_and_add("queues",
1255 NULL, &net->dev.kobj);
1256 if (!net->queues_kset)
1257 return -ENOMEM;
Tom Herbertbf264142010-11-26 08:36:09 +00001258#endif
Tom Herbert1d24eb42010-11-21 13:17:27 +00001259
Tom Herbertbf264142010-11-26 08:36:09 +00001260#ifdef CONFIG_RPS
1261 real_rx = net->real_num_rx_queues;
1262#endif
1263 real_tx = net->real_num_tx_queues;
1264
1265 error = net_rx_queue_update_kobjects(net, 0, real_rx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001266 if (error)
1267 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001268 rxq = real_rx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001269
Tom Herbertbf264142010-11-26 08:36:09 +00001270 error = netdev_queue_update_kobjects(net, 0, real_tx);
Tom Herbert1d24eb42010-11-21 13:17:27 +00001271 if (error)
1272 goto error;
Tom Herbertbf264142010-11-26 08:36:09 +00001273 txq = real_tx;
Tom Herbert1d24eb42010-11-21 13:17:27 +00001274
1275 return 0;
1276
1277error:
1278 netdev_queue_update_kobjects(net, txq, 0);
1279 net_rx_queue_update_kobjects(net, rxq, 0);
1280 return error;
Ben Hutchings62fe0b42010-09-27 08:24:33 +00001281}
1282
Tom Herbert1d24eb42010-11-21 13:17:27 +00001283static void remove_queue_kobjects(struct net_device *net)
Tom Herbert0a9627f2010-03-16 08:03:29 +00001284{
Tom Herbertbf264142010-11-26 08:36:09 +00001285 int real_rx = 0, real_tx = 0;
1286
1287#ifdef CONFIG_RPS
1288 real_rx = net->real_num_rx_queues;
1289#endif
1290 real_tx = net->real_num_tx_queues;
1291
1292 net_rx_queue_update_kobjects(net, real_rx, 0);
1293 netdev_queue_update_kobjects(net, real_tx, 0);
david decotignyccf5ff62011-11-16 12:15:10 +00001294#ifdef CONFIG_SYSFS
Tom Herbert0a9627f2010-03-16 08:03:29 +00001295 kset_unregister(net->queues_kset);
Tom Herbertbf264142010-11-26 08:36:09 +00001296#endif
Tom Herbert0a9627f2010-03-16 08:03:29 +00001297}
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001298
Al Viroa685e082011-06-08 21:13:01 -04001299static void *net_grab_current_ns(void)
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001300{
Al Viroa685e082011-06-08 21:13:01 -04001301 struct net *ns = current->nsproxy->net_ns;
1302#ifdef CONFIG_NET_NS
1303 if (ns)
1304 atomic_inc(&ns->passive);
1305#endif
1306 return ns;
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001307}
1308
1309static const void *net_initial_ns(void)
1310{
1311 return &init_net;
1312}
1313
1314static const void *net_netlink_ns(struct sock *sk)
1315{
1316 return sock_net(sk);
1317}
1318
Johannes Berg04600792010-08-05 17:45:15 +02001319struct kobj_ns_type_operations net_ns_type_operations = {
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001320 .type = KOBJ_NS_TYPE_NET,
Al Viroa685e082011-06-08 21:13:01 -04001321 .grab_current_ns = net_grab_current_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001322 .netlink_ns = net_netlink_ns,
1323 .initial_ns = net_initial_ns,
Al Viroa685e082011-06-08 21:13:01 -04001324 .drop_ns = net_drop_ns,
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001325};
Johannes Berg04600792010-08-05 17:45:15 +02001326EXPORT_SYMBOL_GPL(net_ns_type_operations);
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328#ifdef CONFIG_HOTPLUG
Kay Sievers7eff2e72007-08-14 15:15:12 +02001329static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001331 struct net_device *dev = to_net_dev(d);
Kay Sievers7eff2e72007-08-14 15:15:12 +02001332 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Kay Sievers312c0042005-11-16 09:00:00 +01001334 /* pass interface to uevent. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001335 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
Eric Rannaudbf624562007-03-30 22:23:12 -07001336 if (retval)
1337 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001339 /* pass ifindex to uevent.
1340 * ifindex is useful as it won't change (interface name may change)
1341 * and is what RtNetlink uses natively. */
Kay Sievers7eff2e72007-08-14 15:15:12 +02001342 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
Jean Tourrilhesca2f37d2007-03-07 10:49:30 -08001343
Eric Rannaudbf624562007-03-30 22:23:12 -07001344exit:
Eric Rannaudbf624562007-03-30 22:23:12 -07001345 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
1347#endif
1348
1349/*
YOSHIFUJI Hideaki4ec93ed2007-02-09 23:24:36 +09001350 * netdev_release -- destroy and free a dead device.
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001351 * Called when last reference to device kobject is gone.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 */
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001353static void netdev_release(struct device *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001355 struct net_device *dev = to_net_dev(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
1357 BUG_ON(dev->reg_state != NETREG_RELEASED);
1358
Stephen Hemminger0b815a12008-09-22 21:28:11 -07001359 kfree(dev->ifalias);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 kfree((char *)dev - dev->padded);
1361}
1362
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001363static const void *net_namespace(struct device *d)
1364{
1365 struct net_device *dev;
1366 dev = container_of(d, struct net_device, dev);
1367 return dev_net(dev);
1368}
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370static struct class net_class = {
1371 .name = "net",
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001372 .dev_release = netdev_release,
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001373#ifdef CONFIG_SYSFS
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001374 .dev_attrs = net_class_attributes,
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001375#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376#ifdef CONFIG_HOTPLUG
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001377 .dev_uevent = netdev_uevent,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378#endif
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001379 .ns_type = &net_ns_type_operations,
1380 .namespace = net_namespace,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381};
1382
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001383/* Delete sysfs entries but hold kobject reference until after all
1384 * netdev references are gone.
1385 */
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001386void netdev_unregister_kobject(struct net_device * net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001388 struct device *dev = &(net->dev);
1389
1390 kobject_get(&dev->kobj);
Eric W. Biederman38918452008-10-27 17:51:47 -07001391
Tom Herbert1d24eb42010-11-21 13:17:27 +00001392 remove_queue_kobjects(net);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001393
Stephen Hemminger9093bbb2007-05-19 15:39:25 -07001394 device_del(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
1396
1397/* Create sysfs entries for network device. */
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001398int netdev_register_kobject(struct net_device *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399{
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001400 struct device *dev = &(net->dev);
David Brownella4dbd672009-06-24 10:06:31 -07001401 const struct attribute_group **groups = net->sysfs_groups;
Tom Herbert0a9627f2010-03-16 08:03:29 +00001402 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Eric W. Biedermana1b3f592010-05-04 17:36:49 -07001404 device_initialize(dev);
Greg Kroah-Hartman43cb76d2002-04-09 12:14:34 -07001405 dev->class = &net_class;
1406 dev->platform_data = net;
1407 dev->groups = groups;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Stephen Hemmingera2205472009-03-09 13:51:55 +00001409 dev_set_name(dev, "%s", net->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001411#ifdef CONFIG_SYSFS
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001412 /* Allow for a device specific group */
1413 if (*groups)
1414 groups++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
Eric W. Biederman0c509a62009-10-29 14:18:21 +00001416 *groups++ = &netstat_group;
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001417#endif /* CONFIG_SYSFS */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Tom Herbert0a9627f2010-03-16 08:03:29 +00001419 error = device_add(dev);
1420 if (error)
1421 return error;
1422
Tom Herbert1d24eb42010-11-21 13:17:27 +00001423 error = register_queue_kobjects(net);
Tom Herbert0a9627f2010-03-16 08:03:29 +00001424 if (error) {
1425 device_del(dev);
1426 return error;
1427 }
1428
1429 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
1431
Jay Vosburghb8a97872008-06-13 18:12:04 -07001432int netdev_class_create_file(struct class_attribute *class_attr)
1433{
1434 return class_create_file(&net_class, class_attr);
1435}
Eric Dumazet9e34a5b2010-07-09 21:22:04 +00001436EXPORT_SYMBOL(netdev_class_create_file);
Jay Vosburghb8a97872008-06-13 18:12:04 -07001437
1438void netdev_class_remove_file(struct class_attribute *class_attr)
1439{
1440 class_remove_file(&net_class, class_attr);
1441}
Jay Vosburghb8a97872008-06-13 18:12:04 -07001442EXPORT_SYMBOL(netdev_class_remove_file);
1443
Eric W. Biederman8b41d182007-09-26 22:02:53 -07001444int netdev_kobject_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
Eric W. Biederman608b4b92010-05-04 17:36:45 -07001446 kobj_ns_type_register(&net_ns_type_operations);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 return class_register(&net_class);
1448}